source: src/linux/universal/linux-4.4/kernel/trace/ftrace.c @ 31885

Last change on this file since 31885 was 31885, checked in by brainslayer, 5 weeks ago

update

File size: 138.0 KB
Line 
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 *  Copyright (C) 2004-2006 Ingo Molnar
13 *  Copyright (C) 2004 Nadia Yvette Chambers
14 */
15
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
20#include <linux/suspend.h>
21#include <linux/tracefs.h>
22#include <linux/hardirq.h>
23#include <linux/kthread.h>
24#include <linux/uaccess.h>
25#include <linux/bsearch.h>
26#include <linux/module.h>
27#include <linux/ftrace.h>
28#include <linux/sysctl.h>
29#include <linux/slab.h>
30#include <linux/ctype.h>
31#include <linux/sort.h>
32#include <linux/list.h>
33#include <linux/hash.h>
34#include <linux/rcupdate.h>
35
36#include <trace/events/sched.h>
37
38#include <asm/setup.h>
39
40#include "trace_output.h"
41#include "trace_stat.h"
42
43#define FTRACE_WARN_ON(cond)                    \
44        ({                                      \
45                int ___r = cond;                \
46                if (WARN_ON(___r))              \
47                        ftrace_kill();          \
48                ___r;                           \
49        })
50
51#define FTRACE_WARN_ON_ONCE(cond)               \
52        ({                                      \
53                int ___r = cond;                \
54                if (WARN_ON_ONCE(___r))         \
55                        ftrace_kill();          \
56                ___r;                           \
57        })
58
59/* hash bits for specific function selection */
60#define FTRACE_HASH_BITS 7
61#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62#define FTRACE_HASH_DEFAULT_BITS 10
63#define FTRACE_HASH_MAX_BITS 12
64
65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
66
67#ifdef CONFIG_DYNAMIC_FTRACE
68#define INIT_OPS_HASH(opsname)  \
69        .func_hash              = &opsname.local_hash,                  \
70        .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71#define ASSIGN_OPS_HASH(opsname, val) \
72        .func_hash              = val, \
73        .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
74#else
75#define INIT_OPS_HASH(opsname)
76#define ASSIGN_OPS_HASH(opsname, val)
77#endif
78
79static struct ftrace_ops ftrace_list_end __read_mostly = {
80        .func           = ftrace_stub,
81        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
82        INIT_OPS_HASH(ftrace_list_end)
83};
84
85/* ftrace_enabled is a method to turn ftrace on or off */
86int ftrace_enabled __read_mostly;
87static int last_ftrace_enabled;
88
89/* Current function tracing op */
90struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
91/* What to set function_trace_op to */
92static struct ftrace_ops *set_function_trace_op;
93
94/* List for set_ftrace_pid's pids. */
95LIST_HEAD(ftrace_pids);
96struct ftrace_pid {
97        struct list_head list;
98        struct pid *pid;
99};
100
101static bool ftrace_pids_enabled(void)
102{
103        return !list_empty(&ftrace_pids);
104}
105
106static void ftrace_update_trampoline(struct ftrace_ops *ops);
107
108/*
109 * ftrace_disabled is set when an anomaly is discovered.
110 * ftrace_disabled is much stronger than ftrace_enabled.
111 */
112static int ftrace_disabled __read_mostly;
113
114static DEFINE_MUTEX(ftrace_lock);
115
116static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
117static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
118ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
119static struct ftrace_ops global_ops;
120static struct ftrace_ops control_ops;
121
122static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
123                                   struct ftrace_ops *op, struct pt_regs *regs);
124
125#if ARCH_SUPPORTS_FTRACE_OPS
126static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
127                                 struct ftrace_ops *op, struct pt_regs *regs);
128#else
129/* See comment below, where ftrace_ops_list_func is defined */
130static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
131#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
132#endif
133
134/*
135 * Traverse the ftrace_global_list, invoking all entries.  The reason that we
136 * can use rcu_dereference_raw_notrace() is that elements removed from this list
137 * are simply leaked, so there is no need to interact with a grace-period
138 * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
139 * concurrent insertions into the ftrace_global_list.
140 *
141 * Silly Alpha and silly pointer-speculation compiler optimizations!
142 */
143#define do_for_each_ftrace_op(op, list)                 \
144        op = rcu_dereference_raw_notrace(list);                 \
145        do
146
147/*
148 * Optimized for just a single item in the list (as that is the normal case).
149 */
150#define while_for_each_ftrace_op(op)                            \
151        while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&  \
152               unlikely((op) != &ftrace_list_end))
153
154static inline void ftrace_ops_init(struct ftrace_ops *ops)
155{
156#ifdef CONFIG_DYNAMIC_FTRACE
157        if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
158                mutex_init(&ops->local_hash.regex_lock);
159                ops->func_hash = &ops->local_hash;
160                ops->flags |= FTRACE_OPS_FL_INITIALIZED;
161        }
162#endif
163}
164
165/**
166 * ftrace_nr_registered_ops - return number of ops registered
167 *
168 * Returns the number of ftrace_ops registered and tracing functions
169 */
170int ftrace_nr_registered_ops(void)
171{
172        struct ftrace_ops *ops;
173        int cnt = 0;
174
175        mutex_lock(&ftrace_lock);
176
177        for (ops = ftrace_ops_list;
178             ops != &ftrace_list_end; ops = ops->next)
179                cnt++;
180
181        mutex_unlock(&ftrace_lock);
182
183        return cnt;
184}
185
186static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
187                            struct ftrace_ops *op, struct pt_regs *regs)
188{
189        if (!test_tsk_trace_trace(current))
190                return;
191
192        op->saved_func(ip, parent_ip, op, regs);
193}
194
195/**
196 * clear_ftrace_function - reset the ftrace function
197 *
198 * This NULLs the ftrace function and in essence stops
199 * tracing.  There may be lag
200 */
201void clear_ftrace_function(void)
202{
203        ftrace_trace_function = ftrace_stub;
204}
205
206static void control_ops_disable_all(struct ftrace_ops *ops)
207{
208        int cpu;
209
210        for_each_possible_cpu(cpu)
211                *per_cpu_ptr(ops->disabled, cpu) = 1;
212}
213
214static int control_ops_alloc(struct ftrace_ops *ops)
215{
216        int __percpu *disabled;
217
218        disabled = alloc_percpu(int);
219        if (!disabled)
220                return -ENOMEM;
221
222        ops->disabled = disabled;
223        control_ops_disable_all(ops);
224        return 0;
225}
226
227static void ftrace_sync(struct work_struct *work)
228{
229        /*
230         * This function is just a stub to implement a hard force
231         * of synchronize_sched(). This requires synchronizing
232         * tasks even in userspace and idle.
233         *
234         * Yes, function tracing is rude.
235         */
236}
237
238static void ftrace_sync_ipi(void *data)
239{
240        /* Probably not needed, but do it anyway */
241        smp_rmb();
242}
243
244#ifdef CONFIG_FUNCTION_GRAPH_TRACER
245static void update_function_graph_func(void);
246
247/* Both enabled by default (can be cleared by function_graph tracer flags */
248static bool fgraph_sleep_time = true;
249static bool fgraph_graph_time = true;
250
251#else
252static inline void update_function_graph_func(void) { }
253#endif
254
255
256static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
257{
258        /*
259         * If this is a dynamic ops or we force list func,
260         * then it needs to call the list anyway.
261         */
262        if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
263                return ftrace_ops_list_func;
264
265        return ftrace_ops_get_func(ops);
266}
267
268static void update_ftrace_function(void)
269{
270        ftrace_func_t func;
271
272        /*
273         * Prepare the ftrace_ops that the arch callback will use.
274         * If there's only one ftrace_ops registered, the ftrace_ops_list
275         * will point to the ops we want.
276         */
277        set_function_trace_op = ftrace_ops_list;
278
279        /* If there's no ftrace_ops registered, just call the stub function */
280        if (ftrace_ops_list == &ftrace_list_end) {
281                func = ftrace_stub;
282
283        /*
284         * If we are at the end of the list and this ops is
285         * recursion safe and not dynamic and the arch supports passing ops,
286         * then have the mcount trampoline call the function directly.
287         */
288        } else if (ftrace_ops_list->next == &ftrace_list_end) {
289                func = ftrace_ops_get_list_func(ftrace_ops_list);
290
291        } else {
292                /* Just use the default ftrace_ops */
293                set_function_trace_op = &ftrace_list_end;
294                func = ftrace_ops_list_func;
295        }
296
297        update_function_graph_func();
298
299        /* If there's no change, then do nothing more here */
300        if (ftrace_trace_function == func)
301                return;
302
303        /*
304         * If we are using the list function, it doesn't care
305         * about the function_trace_ops.
306         */
307        if (func == ftrace_ops_list_func) {
308                ftrace_trace_function = func;
309                /*
310                 * Don't even bother setting function_trace_ops,
311                 * it would be racy to do so anyway.
312                 */
313                return;
314        }
315
316#ifndef CONFIG_DYNAMIC_FTRACE
317        /*
318         * For static tracing, we need to be a bit more careful.
319         * The function change takes affect immediately. Thus,
320         * we need to coorditate the setting of the function_trace_ops
321         * with the setting of the ftrace_trace_function.
322         *
323         * Set the function to the list ops, which will call the
324         * function we want, albeit indirectly, but it handles the
325         * ftrace_ops and doesn't depend on function_trace_op.
326         */
327        ftrace_trace_function = ftrace_ops_list_func;
328        /*
329         * Make sure all CPUs see this. Yes this is slow, but static
330         * tracing is slow and nasty to have enabled.
331         */
332        schedule_on_each_cpu(ftrace_sync);
333        /* Now all cpus are using the list ops. */
334        function_trace_op = set_function_trace_op;
335        /* Make sure the function_trace_op is visible on all CPUs */
336        smp_wmb();
337        /* Nasty way to force a rmb on all cpus */
338        smp_call_function(ftrace_sync_ipi, NULL, 1);
339        /* OK, we are all set to update the ftrace_trace_function now! */
340#endif /* !CONFIG_DYNAMIC_FTRACE */
341
342        ftrace_trace_function = func;
343}
344
345int using_ftrace_ops_list_func(void)
346{
347        return ftrace_trace_function == ftrace_ops_list_func;
348}
349
350static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
351{
352        ops->next = *list;
353        /*
354         * We are entering ops into the list but another
355         * CPU might be walking that list. We need to make sure
356         * the ops->next pointer is valid before another CPU sees
357         * the ops pointer included into the list.
358         */
359        rcu_assign_pointer(*list, ops);
360}
361
362static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
363{
364        struct ftrace_ops **p;
365
366        /*
367         * If we are removing the last function, then simply point
368         * to the ftrace_stub.
369         */
370        if (*list == ops && ops->next == &ftrace_list_end) {
371                *list = &ftrace_list_end;
372                return 0;
373        }
374
375        for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
376                if (*p == ops)
377                        break;
378
379        if (*p != ops)
380                return -1;
381
382        *p = (*p)->next;
383        return 0;
384}
385
386static void add_ftrace_list_ops(struct ftrace_ops **list,
387                                struct ftrace_ops *main_ops,
388                                struct ftrace_ops *ops)
389{
390        int first = *list == &ftrace_list_end;
391        add_ftrace_ops(list, ops);
392        if (first)
393                add_ftrace_ops(&ftrace_ops_list, main_ops);
394}
395
396static int remove_ftrace_list_ops(struct ftrace_ops **list,
397                                  struct ftrace_ops *main_ops,
398                                  struct ftrace_ops *ops)
399{
400        int ret = remove_ftrace_ops(list, ops);
401        if (!ret && *list == &ftrace_list_end)
402                ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
403        return ret;
404}
405
406static void ftrace_update_trampoline(struct ftrace_ops *ops);
407
408static int __register_ftrace_function(struct ftrace_ops *ops)
409{
410        if (ops->flags & FTRACE_OPS_FL_DELETED)
411                return -EINVAL;
412
413        if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
414                return -EBUSY;
415
416#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
417        /*
418         * If the ftrace_ops specifies SAVE_REGS, then it only can be used
419         * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
420         * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
421         */
422        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
423            !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
424                return -EINVAL;
425
426        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
427                ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
428#endif
429
430        if (!core_kernel_data((unsigned long)ops))
431                ops->flags |= FTRACE_OPS_FL_DYNAMIC;
432
433        if (ops->flags & FTRACE_OPS_FL_CONTROL) {
434                if (control_ops_alloc(ops))
435                        return -ENOMEM;
436                add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
437                /* The control_ops needs the trampoline update */
438                ops = &control_ops;
439        } else
440                add_ftrace_ops(&ftrace_ops_list, ops);
441
442        /* Always save the function, and reset at unregistering */
443        ops->saved_func = ops->func;
444
445        if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
446                ops->func = ftrace_pid_func;
447
448        ftrace_update_trampoline(ops);
449
450        if (ftrace_enabled)
451                update_ftrace_function();
452
453        return 0;
454}
455
456static int __unregister_ftrace_function(struct ftrace_ops *ops)
457{
458        int ret;
459
460        if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
461                return -EBUSY;
462
463        if (ops->flags & FTRACE_OPS_FL_CONTROL) {
464                ret = remove_ftrace_list_ops(&ftrace_control_list,
465                                             &control_ops, ops);
466        } else
467                ret = remove_ftrace_ops(&ftrace_ops_list, ops);
468
469        if (ret < 0)
470                return ret;
471
472        if (ftrace_enabled)
473                update_ftrace_function();
474
475        ops->func = ops->saved_func;
476
477        return 0;
478}
479
480static void ftrace_update_pid_func(void)
481{
482        bool enabled = ftrace_pids_enabled();
483        struct ftrace_ops *op;
484
485        /* Only do something if we are tracing something */
486        if (ftrace_trace_function == ftrace_stub)
487                return;
488
489        do_for_each_ftrace_op(op, ftrace_ops_list) {
490                if (op->flags & FTRACE_OPS_FL_PID) {
491                        op->func = enabled ? ftrace_pid_func :
492                                op->saved_func;
493                        ftrace_update_trampoline(op);
494                }
495        } while_for_each_ftrace_op(op);
496
497        update_ftrace_function();
498}
499
500#ifdef CONFIG_FUNCTION_PROFILER
501struct ftrace_profile {
502        struct hlist_node               node;
503        unsigned long                   ip;
504        unsigned long                   counter;
505#ifdef CONFIG_FUNCTION_GRAPH_TRACER
506        unsigned long long              time;
507        unsigned long long              time_squared;
508#endif
509};
510
511struct ftrace_profile_page {
512        struct ftrace_profile_page      *next;
513        unsigned long                   index;
514        struct ftrace_profile           records[];
515};
516
517struct ftrace_profile_stat {
518        atomic_t                        disabled;
519        struct hlist_head               *hash;
520        struct ftrace_profile_page      *pages;
521        struct ftrace_profile_page      *start;
522        struct tracer_stat              stat;
523};
524
525#define PROFILE_RECORDS_SIZE                                            \
526        (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
527
528#define PROFILES_PER_PAGE                                       \
529        (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
530
531static int ftrace_profile_enabled __read_mostly;
532
533/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
534static DEFINE_MUTEX(ftrace_profile_lock);
535
536static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
537
538#define FTRACE_PROFILE_HASH_BITS 10
539#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
540
541static void *
542function_stat_next(void *v, int idx)
543{
544        struct ftrace_profile *rec = v;
545        struct ftrace_profile_page *pg;
546
547        pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
548
549 again:
550        if (idx != 0)
551                rec++;
552
553        if ((void *)rec >= (void *)&pg->records[pg->index]) {
554                pg = pg->next;
555                if (!pg)
556                        return NULL;
557                rec = &pg->records[0];
558                if (!rec->counter)
559                        goto again;
560        }
561
562        return rec;
563}
564
565static void *function_stat_start(struct tracer_stat *trace)
566{
567        struct ftrace_profile_stat *stat =
568                container_of(trace, struct ftrace_profile_stat, stat);
569
570        if (!stat || !stat->start)
571                return NULL;
572
573        return function_stat_next(&stat->start->records[0], 0);
574}
575
576#ifdef CONFIG_FUNCTION_GRAPH_TRACER
577/* function graph compares on total time */
578static int function_stat_cmp(void *p1, void *p2)
579{
580        struct ftrace_profile *a = p1;
581        struct ftrace_profile *b = p2;
582
583        if (a->time < b->time)
584                return -1;
585        if (a->time > b->time)
586                return 1;
587        else
588                return 0;
589}
590#else
591/* not function graph compares against hits */
592static int function_stat_cmp(void *p1, void *p2)
593{
594        struct ftrace_profile *a = p1;
595        struct ftrace_profile *b = p2;
596
597        if (a->counter < b->counter)
598                return -1;
599        if (a->counter > b->counter)
600                return 1;
601        else
602                return 0;
603}
604#endif
605
606static int function_stat_headers(struct seq_file *m)
607{
608#ifdef CONFIG_FUNCTION_GRAPH_TRACER
609        seq_puts(m, "  Function                               "
610                 "Hit    Time            Avg             s^2\n"
611                    "  --------                               "
612                 "---    ----            ---             ---\n");
613#else
614        seq_puts(m, "  Function                               Hit\n"
615                    "  --------                               ---\n");
616#endif
617        return 0;
618}
619
620static int function_stat_show(struct seq_file *m, void *v)
621{
622        struct ftrace_profile *rec = v;
623        char str[KSYM_SYMBOL_LEN];
624        int ret = 0;
625#ifdef CONFIG_FUNCTION_GRAPH_TRACER
626        static struct trace_seq s;
627        unsigned long long avg;
628        unsigned long long stddev;
629#endif
630        mutex_lock(&ftrace_profile_lock);
631
632        /* we raced with function_profile_reset() */
633        if (unlikely(rec->counter == 0)) {
634                ret = -EBUSY;
635                goto out;
636        }
637
638#ifdef CONFIG_FUNCTION_GRAPH_TRACER
639        avg = rec->time;
640        do_div(avg, rec->counter);
641        if (tracing_thresh && (avg < tracing_thresh))
642                goto out;
643#endif
644
645        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
646        seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
647
648#ifdef CONFIG_FUNCTION_GRAPH_TRACER
649        seq_puts(m, "    ");
650
651        /* Sample standard deviation (s^2) */
652        if (rec->counter <= 1)
653                stddev = 0;
654        else {
655                /*
656                 * Apply Welford's method:
657                 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
658                 */
659                stddev = rec->counter * rec->time_squared -
660                         rec->time * rec->time;
661
662                /*
663                 * Divide only 1000 for ns^2 -> us^2 conversion.
664                 * trace_print_graph_duration will divide 1000 again.
665                 */
666                do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
667        }
668
669        trace_seq_init(&s);
670        trace_print_graph_duration(rec->time, &s);
671        trace_seq_puts(&s, "    ");
672        trace_print_graph_duration(avg, &s);
673        trace_seq_puts(&s, "    ");
674        trace_print_graph_duration(stddev, &s);
675        trace_print_seq(m, &s);
676#endif
677        seq_putc(m, '\n');
678out:
679        mutex_unlock(&ftrace_profile_lock);
680
681        return ret;
682}
683
684static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
685{
686        struct ftrace_profile_page *pg;
687
688        pg = stat->pages = stat->start;
689
690        while (pg) {
691                memset(pg->records, 0, PROFILE_RECORDS_SIZE);
692                pg->index = 0;
693                pg = pg->next;
694        }
695
696        memset(stat->hash, 0,
697               FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
698}
699
700int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
701{
702        struct ftrace_profile_page *pg;
703        int functions;
704        int pages;
705        int i;
706
707        /* If we already allocated, do nothing */
708        if (stat->pages)
709                return 0;
710
711        stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
712        if (!stat->pages)
713                return -ENOMEM;
714
715#ifdef CONFIG_DYNAMIC_FTRACE
716        functions = ftrace_update_tot_cnt;
717#else
718        /*
719         * We do not know the number of functions that exist because
720         * dynamic tracing is what counts them. With past experience
721         * we have around 20K functions. That should be more than enough.
722         * It is highly unlikely we will execute every function in
723         * the kernel.
724         */
725        functions = 20000;
726#endif
727
728        pg = stat->start = stat->pages;
729
730        pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
731
732        for (i = 1; i < pages; i++) {
733                pg->next = (void *)get_zeroed_page(GFP_KERNEL);
734                if (!pg->next)
735                        goto out_free;
736                pg = pg->next;
737        }
738
739        return 0;
740
741 out_free:
742        pg = stat->start;
743        while (pg) {
744                unsigned long tmp = (unsigned long)pg;
745
746                pg = pg->next;
747                free_page(tmp);
748        }
749
750        stat->pages = NULL;
751        stat->start = NULL;
752
753        return -ENOMEM;
754}
755
756static int ftrace_profile_init_cpu(int cpu)
757{
758        struct ftrace_profile_stat *stat;
759        int size;
760
761        stat = &per_cpu(ftrace_profile_stats, cpu);
762
763        if (stat->hash) {
764                /* If the profile is already created, simply reset it */
765                ftrace_profile_reset(stat);
766                return 0;
767        }
768
769        /*
770         * We are profiling all functions, but usually only a few thousand
771         * functions are hit. We'll make a hash of 1024 items.
772         */
773        size = FTRACE_PROFILE_HASH_SIZE;
774
775        stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
776
777        if (!stat->hash)
778                return -ENOMEM;
779
780        /* Preallocate the function profiling pages */
781        if (ftrace_profile_pages_init(stat) < 0) {
782                kfree(stat->hash);
783                stat->hash = NULL;
784                return -ENOMEM;
785        }
786
787        return 0;
788}
789
790static int ftrace_profile_init(void)
791{
792        int cpu;
793        int ret = 0;
794
795        for_each_possible_cpu(cpu) {
796                ret = ftrace_profile_init_cpu(cpu);
797                if (ret)
798                        break;
799        }
800
801        return ret;
802}
803
804/* interrupts must be disabled */
805static struct ftrace_profile *
806ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
807{
808        struct ftrace_profile *rec;
809        struct hlist_head *hhd;
810        unsigned long key;
811
812        key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
813        hhd = &stat->hash[key];
814
815        if (hlist_empty(hhd))
816                return NULL;
817
818        hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
819                if (rec->ip == ip)
820                        return rec;
821        }
822
823        return NULL;
824}
825
826static void ftrace_add_profile(struct ftrace_profile_stat *stat,
827                               struct ftrace_profile *rec)
828{
829        unsigned long key;
830
831        key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
832        hlist_add_head_rcu(&rec->node, &stat->hash[key]);
833}
834
835/*
836 * The memory is already allocated, this simply finds a new record to use.
837 */
838static struct ftrace_profile *
839ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
840{
841        struct ftrace_profile *rec = NULL;
842
843        /* prevent recursion (from NMIs) */
844        if (atomic_inc_return(&stat->disabled) != 1)
845                goto out;
846
847        /*
848         * Try to find the function again since an NMI
849         * could have added it
850         */
851        rec = ftrace_find_profiled_func(stat, ip);
852        if (rec)
853                goto out;
854
855        if (stat->pages->index == PROFILES_PER_PAGE) {
856                if (!stat->pages->next)
857                        goto out;
858                stat->pages = stat->pages->next;
859        }
860
861        rec = &stat->pages->records[stat->pages->index++];
862        rec->ip = ip;
863        ftrace_add_profile(stat, rec);
864
865 out:
866        atomic_dec(&stat->disabled);
867
868        return rec;
869}
870
871static void
872function_profile_call(unsigned long ip, unsigned long parent_ip,
873                      struct ftrace_ops *ops, struct pt_regs *regs)
874{
875        struct ftrace_profile_stat *stat;
876        struct ftrace_profile *rec;
877        unsigned long flags;
878
879        if (!ftrace_profile_enabled)
880                return;
881
882        local_irq_save(flags);
883
884        stat = this_cpu_ptr(&ftrace_profile_stats);
885        if (!stat->hash || !ftrace_profile_enabled)
886                goto out;
887
888        rec = ftrace_find_profiled_func(stat, ip);
889        if (!rec) {
890                rec = ftrace_profile_alloc(stat, ip);
891                if (!rec)
892                        goto out;
893        }
894
895        rec->counter++;
896 out:
897        local_irq_restore(flags);
898}
899
900#ifdef CONFIG_FUNCTION_GRAPH_TRACER
901static int profile_graph_entry(struct ftrace_graph_ent *trace)
902{
903        function_profile_call(trace->func, 0, NULL, NULL);
904        return 1;
905}
906
907static void profile_graph_return(struct ftrace_graph_ret *trace)
908{
909        struct ftrace_profile_stat *stat;
910        unsigned long long calltime;
911        struct ftrace_profile *rec;
912        unsigned long flags;
913
914        local_irq_save(flags);
915        stat = this_cpu_ptr(&ftrace_profile_stats);
916        if (!stat->hash || !ftrace_profile_enabled)
917                goto out;
918
919        /* If the calltime was zero'd ignore it */
920        if (!trace->calltime)
921                goto out;
922
923        calltime = trace->rettime - trace->calltime;
924
925        if (!fgraph_graph_time) {
926                int index;
927
928                index = trace->depth;
929
930                /* Append this call time to the parent time to subtract */
931                if (index)
932                        current->ret_stack[index - 1].subtime += calltime;
933
934                if (current->ret_stack[index].subtime < calltime)
935                        calltime -= current->ret_stack[index].subtime;
936                else
937                        calltime = 0;
938        }
939
940        rec = ftrace_find_profiled_func(stat, trace->func);
941        if (rec) {
942                rec->time += calltime;
943                rec->time_squared += calltime * calltime;
944        }
945
946 out:
947        local_irq_restore(flags);
948}
949
950static int register_ftrace_profiler(void)
951{
952        return register_ftrace_graph(&profile_graph_return,
953                                     &profile_graph_entry);
954}
955
956static void unregister_ftrace_profiler(void)
957{
958        unregister_ftrace_graph();
959}
960#else
961static struct ftrace_ops ftrace_profile_ops __read_mostly = {
962        .func           = function_profile_call,
963        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
964        INIT_OPS_HASH(ftrace_profile_ops)
965};
966
967static int register_ftrace_profiler(void)
968{
969        return register_ftrace_function(&ftrace_profile_ops);
970}
971
972static void unregister_ftrace_profiler(void)
973{
974        unregister_ftrace_function(&ftrace_profile_ops);
975}
976#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
977
978static ssize_t
979ftrace_profile_write(struct file *filp, const char __user *ubuf,
980                     size_t cnt, loff_t *ppos)
981{
982        unsigned long val;
983        int ret;
984
985        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
986        if (ret)
987                return ret;
988
989        val = !!val;
990
991        mutex_lock(&ftrace_profile_lock);
992        if (ftrace_profile_enabled ^ val) {
993                if (val) {
994                        ret = ftrace_profile_init();
995                        if (ret < 0) {
996                                cnt = ret;
997                                goto out;
998                        }
999
1000                        ret = register_ftrace_profiler();
1001                        if (ret < 0) {
1002                                cnt = ret;
1003                                goto out;
1004                        }
1005                        ftrace_profile_enabled = 1;
1006                } else {
1007                        ftrace_profile_enabled = 0;
1008                        /*
1009                         * unregister_ftrace_profiler calls stop_machine
1010                         * so this acts like an synchronize_sched.
1011                         */
1012                        unregister_ftrace_profiler();
1013                }
1014        }
1015 out:
1016        mutex_unlock(&ftrace_profile_lock);
1017
1018        *ppos += cnt;
1019
1020        return cnt;
1021}
1022
1023static ssize_t
1024ftrace_profile_read(struct file *filp, char __user *ubuf,
1025                     size_t cnt, loff_t *ppos)
1026{
1027        char buf[64];           /* big enough to hold a number */
1028        int r;
1029
1030        r = sprintf(buf, "%u\n", ftrace_profile_enabled);
1031        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1032}
1033
1034static const struct file_operations ftrace_profile_fops = {
1035        .open           = tracing_open_generic,
1036        .read           = ftrace_profile_read,
1037        .write          = ftrace_profile_write,
1038        .llseek         = default_llseek,
1039};
1040
1041/* used to initialize the real stat files */
1042static struct tracer_stat function_stats __initdata = {
1043        .name           = "functions",
1044        .stat_start     = function_stat_start,
1045        .stat_next      = function_stat_next,
1046        .stat_cmp       = function_stat_cmp,
1047        .stat_headers   = function_stat_headers,
1048        .stat_show      = function_stat_show
1049};
1050
1051static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1052{
1053        struct ftrace_profile_stat *stat;
1054        struct dentry *entry;
1055        char *name;
1056        int ret;
1057        int cpu;
1058
1059        for_each_possible_cpu(cpu) {
1060                stat = &per_cpu(ftrace_profile_stats, cpu);
1061
1062                /* allocate enough for function name + cpu number */
1063                name = kmalloc(32, GFP_KERNEL);
1064                if (!name) {
1065                        /*
1066                         * The files created are permanent, if something happens
1067                         * we still do not free memory.
1068                         */
1069                        WARN(1,
1070                             "Could not allocate stat file for cpu %d\n",
1071                             cpu);
1072                        return;
1073                }
1074                stat->stat = function_stats;
1075                snprintf(name, 32, "function%d", cpu);
1076                stat->stat.name = name;
1077                ret = register_stat_tracer(&stat->stat);
1078                if (ret) {
1079                        WARN(1,
1080                             "Could not register function stat for cpu %d\n",
1081                             cpu);
1082                        kfree(name);
1083                        return;
1084                }
1085        }
1086
1087        entry = tracefs_create_file("function_profile_enabled", 0644,
1088                                    d_tracer, NULL, &ftrace_profile_fops);
1089        if (!entry)
1090                pr_warning("Could not create tracefs "
1091                           "'function_profile_enabled' entry\n");
1092}
1093
1094#else /* CONFIG_FUNCTION_PROFILER */
1095static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1096{
1097}
1098#endif /* CONFIG_FUNCTION_PROFILER */
1099
1100static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1101
1102#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1103static int ftrace_graph_active;
1104#else
1105# define ftrace_graph_active 0
1106#endif
1107
1108#ifdef CONFIG_DYNAMIC_FTRACE
1109
1110static struct ftrace_ops *removed_ops;
1111
1112/*
1113 * Set when doing a global update, like enabling all recs or disabling them.
1114 * It is not set when just updating a single ftrace_ops.
1115 */
1116static bool update_all_ops;
1117
1118#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1119# error Dynamic ftrace depends on MCOUNT_RECORD
1120#endif
1121
1122static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1123
1124struct ftrace_func_probe {
1125        struct hlist_node       node;
1126        struct ftrace_probe_ops *ops;
1127        unsigned long           flags;
1128        unsigned long           ip;
1129        void                    *data;
1130        struct list_head        free_list;
1131};
1132
1133struct ftrace_func_entry {
1134        struct hlist_node hlist;
1135        unsigned long ip;
1136};
1137
1138struct ftrace_hash {
1139        unsigned long           size_bits;
1140        struct hlist_head       *buckets;
1141        unsigned long           count;
1142        struct rcu_head         rcu;
1143};
1144
1145/*
1146 * We make these constant because no one should touch them,
1147 * but they are used as the default "empty hash", to avoid allocating
1148 * it all the time. These are in a read only section such that if
1149 * anyone does try to modify it, it will cause an exception.
1150 */
1151static const struct hlist_head empty_buckets[1];
1152static const struct ftrace_hash empty_hash = {
1153        .buckets = (struct hlist_head *)empty_buckets,
1154};
1155#define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1156
1157static struct ftrace_ops global_ops = {
1158        .func                           = ftrace_stub,
1159        .local_hash.notrace_hash        = EMPTY_HASH,
1160        .local_hash.filter_hash         = EMPTY_HASH,
1161        INIT_OPS_HASH(global_ops)
1162        .flags                          = FTRACE_OPS_FL_RECURSION_SAFE |
1163                                          FTRACE_OPS_FL_INITIALIZED |
1164                                          FTRACE_OPS_FL_PID,
1165};
1166
1167/*
1168 * This is used by __kernel_text_address() to return true if the
1169 * address is on a dynamically allocated trampoline that would
1170 * not return true for either core_kernel_text() or
1171 * is_module_text_address().
1172 */
1173bool is_ftrace_trampoline(unsigned long addr)
1174{
1175        struct ftrace_ops *op;
1176        bool ret = false;
1177
1178        /*
1179         * Some of the ops may be dynamically allocated,
1180         * they are freed after a synchronize_sched().
1181         */
1182        preempt_disable_notrace();
1183
1184        do_for_each_ftrace_op(op, ftrace_ops_list) {
1185                /*
1186                 * This is to check for dynamically allocated trampolines.
1187                 * Trampolines that are in kernel text will have
1188                 * core_kernel_text() return true.
1189                 */
1190                if (op->trampoline && op->trampoline_size)
1191                        if (addr >= op->trampoline &&
1192                            addr < op->trampoline + op->trampoline_size) {
1193                                ret = true;
1194                                goto out;
1195                        }
1196        } while_for_each_ftrace_op(op);
1197
1198 out:
1199        preempt_enable_notrace();
1200
1201        return ret;
1202}
1203
1204struct ftrace_page {
1205        struct ftrace_page      *next;
1206        struct dyn_ftrace       *records;
1207        int                     index;
1208        int                     size;
1209};
1210
1211#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1212#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1213
1214/* estimate from running different kernels */
1215#define NR_TO_INIT              10000
1216
1217static struct ftrace_page       *ftrace_pages_start;
1218static struct ftrace_page       *ftrace_pages;
1219
1220static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
1221{
1222        return !hash || !hash->count;
1223}
1224
1225static struct ftrace_func_entry *
1226ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1227{
1228        unsigned long key;
1229        struct ftrace_func_entry *entry;
1230        struct hlist_head *hhd;
1231
1232        if (ftrace_hash_empty(hash))
1233                return NULL;
1234
1235        if (hash->size_bits > 0)
1236                key = hash_long(ip, hash->size_bits);
1237        else
1238                key = 0;
1239
1240        hhd = &hash->buckets[key];
1241
1242        hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1243                if (entry->ip == ip)
1244                        return entry;
1245        }
1246        return NULL;
1247}
1248
1249static void __add_hash_entry(struct ftrace_hash *hash,
1250                             struct ftrace_func_entry *entry)
1251{
1252        struct hlist_head *hhd;
1253        unsigned long key;
1254
1255        if (hash->size_bits)
1256                key = hash_long(entry->ip, hash->size_bits);
1257        else
1258                key = 0;
1259
1260        hhd = &hash->buckets[key];
1261        hlist_add_head(&entry->hlist, hhd);
1262        hash->count++;
1263}
1264
1265static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1266{
1267        struct ftrace_func_entry *entry;
1268
1269        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1270        if (!entry)
1271                return -ENOMEM;
1272
1273        entry->ip = ip;
1274        __add_hash_entry(hash, entry);
1275
1276        return 0;
1277}
1278
1279static void
1280free_hash_entry(struct ftrace_hash *hash,
1281                  struct ftrace_func_entry *entry)
1282{
1283        hlist_del(&entry->hlist);
1284        kfree(entry);
1285        hash->count--;
1286}
1287
1288static void
1289remove_hash_entry(struct ftrace_hash *hash,
1290                  struct ftrace_func_entry *entry)
1291{
1292        hlist_del(&entry->hlist);
1293        hash->count--;
1294}
1295
1296static void ftrace_hash_clear(struct ftrace_hash *hash)
1297{
1298        struct hlist_head *hhd;
1299        struct hlist_node *tn;
1300        struct ftrace_func_entry *entry;
1301        int size = 1 << hash->size_bits;
1302        int i;
1303
1304        if (!hash->count)
1305                return;
1306
1307        for (i = 0; i < size; i++) {
1308                hhd = &hash->buckets[i];
1309                hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1310                        free_hash_entry(hash, entry);
1311        }
1312        FTRACE_WARN_ON(hash->count);
1313}
1314
1315static void free_ftrace_hash(struct ftrace_hash *hash)
1316{
1317        if (!hash || hash == EMPTY_HASH)
1318                return;
1319        ftrace_hash_clear(hash);
1320        kfree(hash->buckets);
1321        kfree(hash);
1322}
1323
1324static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1325{
1326        struct ftrace_hash *hash;
1327
1328        hash = container_of(rcu, struct ftrace_hash, rcu);
1329        free_ftrace_hash(hash);
1330}
1331
1332static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1333{
1334        if (!hash || hash == EMPTY_HASH)
1335                return;
1336        call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1337}
1338
1339void ftrace_free_filter(struct ftrace_ops *ops)
1340{
1341        ftrace_ops_init(ops);
1342        free_ftrace_hash(ops->func_hash->filter_hash);
1343        free_ftrace_hash(ops->func_hash->notrace_hash);
1344}
1345
1346static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1347{
1348        struct ftrace_hash *hash;
1349        int size;
1350
1351        hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1352        if (!hash)
1353                return NULL;
1354
1355        size = 1 << size_bits;
1356        hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1357
1358        if (!hash->buckets) {
1359                kfree(hash);
1360                return NULL;
1361        }
1362
1363        hash->size_bits = size_bits;
1364
1365        return hash;
1366}
1367
1368static struct ftrace_hash *
1369alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1370{
1371        struct ftrace_func_entry *entry;
1372        struct ftrace_hash *new_hash;
1373        int size;
1374        int ret;
1375        int i;
1376
1377        new_hash = alloc_ftrace_hash(size_bits);
1378        if (!new_hash)
1379                return NULL;
1380
1381        /* Empty hash? */
1382        if (ftrace_hash_empty(hash))
1383                return new_hash;
1384
1385        size = 1 << hash->size_bits;
1386        for (i = 0; i < size; i++) {
1387                hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1388                        ret = add_hash_entry(new_hash, entry->ip);
1389                        if (ret < 0)
1390                                goto free_hash;
1391                }
1392        }
1393
1394        FTRACE_WARN_ON(new_hash->count != hash->count);
1395
1396        return new_hash;
1397
1398 free_hash:
1399        free_ftrace_hash(new_hash);
1400        return NULL;
1401}
1402
1403static void
1404ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1405static void
1406ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1407
1408static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1409                                       struct ftrace_hash *new_hash);
1410
1411static int
1412ftrace_hash_move(struct ftrace_ops *ops, int enable,
1413                 struct ftrace_hash **dst, struct ftrace_hash *src)
1414{
1415        struct ftrace_func_entry *entry;
1416        struct hlist_node *tn;
1417        struct hlist_head *hhd;
1418        struct ftrace_hash *new_hash;
1419        int size = src->count;
1420        int bits = 0;
1421        int ret;
1422        int i;
1423
1424        /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1425        if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1426                return -EINVAL;
1427
1428        /*
1429         * If the new source is empty, just free dst and assign it
1430         * the empty_hash.
1431         */
1432        if (!src->count) {
1433                new_hash = EMPTY_HASH;
1434                goto update;
1435        }
1436
1437        /*
1438         * Make the hash size about 1/2 the # found
1439         */
1440        for (size /= 2; size; size >>= 1)
1441                bits++;
1442
1443        /* Don't allocate too much */
1444        if (bits > FTRACE_HASH_MAX_BITS)
1445                bits = FTRACE_HASH_MAX_BITS;
1446
1447        new_hash = alloc_ftrace_hash(bits);
1448        if (!new_hash)
1449                return -ENOMEM;
1450
1451        size = 1 << src->size_bits;
1452        for (i = 0; i < size; i++) {
1453                hhd = &src->buckets[i];
1454                hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1455                        remove_hash_entry(src, entry);
1456                        __add_hash_entry(new_hash, entry);
1457                }
1458        }
1459
1460update:
1461        /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1462        if (enable) {
1463                /* IPMODIFY should be updated only when filter_hash updating */
1464                ret = ftrace_hash_ipmodify_update(ops, new_hash);
1465                if (ret < 0) {
1466                        free_ftrace_hash(new_hash);
1467                        return ret;
1468                }
1469        }
1470
1471        /*
1472         * Remove the current set, update the hash and add
1473         * them back.
1474         */
1475        ftrace_hash_rec_disable_modify(ops, enable);
1476
1477        rcu_assign_pointer(*dst, new_hash);
1478
1479        ftrace_hash_rec_enable_modify(ops, enable);
1480
1481        return 0;
1482}
1483
1484static bool hash_contains_ip(unsigned long ip,
1485                             struct ftrace_ops_hash *hash)
1486{
1487        /*
1488         * The function record is a match if it exists in the filter
1489         * hash and not in the notrace hash. Note, an emty hash is
1490         * considered a match for the filter hash, but an empty
1491         * notrace hash is considered not in the notrace hash.
1492         */
1493        return (ftrace_hash_empty(hash->filter_hash) ||
1494                ftrace_lookup_ip(hash->filter_hash, ip)) &&
1495                (ftrace_hash_empty(hash->notrace_hash) ||
1496                 !ftrace_lookup_ip(hash->notrace_hash, ip));
1497}
1498
1499/*
1500 * Test the hashes for this ops to see if we want to call
1501 * the ops->func or not.
1502 *
1503 * It's a match if the ip is in the ops->filter_hash or
1504 * the filter_hash does not exist or is empty,
1505 *  AND
1506 * the ip is not in the ops->notrace_hash.
1507 *
1508 * This needs to be called with preemption disabled as
1509 * the hashes are freed with call_rcu_sched().
1510 */
1511static int
1512ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1513{
1514        struct ftrace_ops_hash hash;
1515        int ret;
1516
1517#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1518        /*
1519         * There's a small race when adding ops that the ftrace handler
1520         * that wants regs, may be called without them. We can not
1521         * allow that handler to be called if regs is NULL.
1522         */
1523        if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1524                return 0;
1525#endif
1526
1527        hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
1528        hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
1529
1530        if (hash_contains_ip(ip, &hash))
1531                ret = 1;
1532        else
1533                ret = 0;
1534
1535        return ret;
1536}
1537
1538/*
1539 * This is a double for. Do not use 'break' to break out of the loop,
1540 * you must use a goto.
1541 */
1542#define do_for_each_ftrace_rec(pg, rec)                                 \
1543        for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1544                int _____i;                                             \
1545                for (_____i = 0; _____i < pg->index; _____i++) {        \
1546                        rec = &pg->records[_____i];
1547
1548#define while_for_each_ftrace_rec()             \
1549                }                               \
1550        }
1551
1552
1553static int ftrace_cmp_recs(const void *a, const void *b)
1554{
1555        const struct dyn_ftrace *key = a;
1556        const struct dyn_ftrace *rec = b;
1557
1558        if (key->flags < rec->ip)
1559                return -1;
1560        if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1561                return 1;
1562        return 0;
1563}
1564
1565static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1566{
1567        struct ftrace_page *pg;
1568        struct dyn_ftrace *rec;
1569        struct dyn_ftrace key;
1570
1571        key.ip = start;
1572        key.flags = end;        /* overload flags, as it is unsigned long */
1573
1574        for (pg = ftrace_pages_start; pg; pg = pg->next) {
1575                if (end < pg->records[0].ip ||
1576                    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1577                        continue;
1578                rec = bsearch(&key, pg->records, pg->index,
1579                              sizeof(struct dyn_ftrace),
1580                              ftrace_cmp_recs);
1581                if (rec)
1582                        return rec->ip;
1583        }
1584
1585        return 0;
1586}
1587
1588/**
1589 * ftrace_location - return true if the ip giving is a traced location
1590 * @ip: the instruction pointer to check
1591 *
1592 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1593 * That is, the instruction that is either a NOP or call to
1594 * the function tracer. It checks the ftrace internal tables to
1595 * determine if the address belongs or not.
1596 */
1597unsigned long ftrace_location(unsigned long ip)
1598{
1599        return ftrace_location_range(ip, ip);
1600}
1601
1602/**
1603 * ftrace_text_reserved - return true if range contains an ftrace location
1604 * @start: start of range to search
1605 * @end: end of range to search (inclusive). @end points to the last byte to check.
1606 *
1607 * Returns 1 if @start and @end contains a ftrace location.
1608 * That is, the instruction that is either a NOP or call to
1609 * the function tracer. It checks the ftrace internal tables to
1610 * determine if the address belongs or not.
1611 */
1612int ftrace_text_reserved(const void *start, const void *end)
1613{
1614        unsigned long ret;
1615
1616        ret = ftrace_location_range((unsigned long)start,
1617                                    (unsigned long)end);
1618
1619        return (int)!!ret;
1620}
1621
1622/* Test if ops registered to this rec needs regs */
1623static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1624{
1625        struct ftrace_ops *ops;
1626        bool keep_regs = false;
1627
1628        for (ops = ftrace_ops_list;
1629             ops != &ftrace_list_end; ops = ops->next) {
1630                /* pass rec in as regs to have non-NULL val */
1631                if (ftrace_ops_test(ops, rec->ip, rec)) {
1632                        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1633                                keep_regs = true;
1634                                break;
1635                        }
1636                }
1637        }
1638
1639        return  keep_regs;
1640}
1641
1642static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1643                                     int filter_hash,
1644                                     bool inc)
1645{
1646        struct ftrace_hash *hash;
1647        struct ftrace_hash *other_hash;
1648        struct ftrace_page *pg;
1649        struct dyn_ftrace *rec;
1650        int count = 0;
1651        int all = 0;
1652
1653        /* Only update if the ops has been registered */
1654        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1655                return;
1656
1657        /*
1658         * In the filter_hash case:
1659         *   If the count is zero, we update all records.
1660         *   Otherwise we just update the items in the hash.
1661         *
1662         * In the notrace_hash case:
1663         *   We enable the update in the hash.
1664         *   As disabling notrace means enabling the tracing,
1665         *   and enabling notrace means disabling, the inc variable
1666         *   gets inversed.
1667         */
1668        if (filter_hash) {
1669                hash = ops->func_hash->filter_hash;
1670                other_hash = ops->func_hash->notrace_hash;
1671                if (ftrace_hash_empty(hash))
1672                        all = 1;
1673        } else {
1674                inc = !inc;
1675                hash = ops->func_hash->notrace_hash;
1676                other_hash = ops->func_hash->filter_hash;
1677                /*
1678                 * If the notrace hash has no items,
1679                 * then there's nothing to do.
1680                 */
1681                if (ftrace_hash_empty(hash))
1682                        return;
1683        }
1684
1685        do_for_each_ftrace_rec(pg, rec) {
1686                int in_other_hash = 0;
1687                int in_hash = 0;
1688                int match = 0;
1689
1690                if (all) {
1691                        /*
1692                         * Only the filter_hash affects all records.
1693                         * Update if the record is not in the notrace hash.
1694                         */
1695                        if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1696                                match = 1;
1697                } else {
1698                        in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1699                        in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1700
1701                        /*
1702                         * If filter_hash is set, we want to match all functions
1703                         * that are in the hash but not in the other hash.
1704                         *
1705                         * If filter_hash is not set, then we are decrementing.
1706                         * That means we match anything that is in the hash
1707                         * and also in the other_hash. That is, we need to turn
1708                         * off functions in the other hash because they are disabled
1709                         * by this hash.
1710                         */
1711                        if (filter_hash && in_hash && !in_other_hash)
1712                                match = 1;
1713                        else if (!filter_hash && in_hash &&
1714                                 (in_other_hash || ftrace_hash_empty(other_hash)))
1715                                match = 1;
1716                }
1717                if (!match)
1718                        continue;
1719
1720                if (inc) {
1721                        rec->flags++;
1722                        if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1723                                return;
1724
1725                        /*
1726                         * If there's only a single callback registered to a
1727                         * function, and the ops has a trampoline registered
1728                         * for it, then we can call it directly.
1729                         */
1730                        if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1731                                rec->flags |= FTRACE_FL_TRAMP;
1732                        else
1733                                /*
1734                                 * If we are adding another function callback
1735                                 * to this function, and the previous had a
1736                                 * custom trampoline in use, then we need to go
1737                                 * back to the default trampoline.
1738                                 */
1739                                rec->flags &= ~FTRACE_FL_TRAMP;
1740
1741                        /*
1742                         * If any ops wants regs saved for this function
1743                         * then all ops will get saved regs.
1744                         */
1745                        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1746                                rec->flags |= FTRACE_FL_REGS;
1747                } else {
1748                        if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1749                                return;
1750                        rec->flags--;
1751
1752                        /*
1753                         * If the rec had REGS enabled and the ops that is
1754                         * being removed had REGS set, then see if there is
1755                         * still any ops for this record that wants regs.
1756                         * If not, we can stop recording them.
1757                         */
1758                        if (ftrace_rec_count(rec) > 0 &&
1759                            rec->flags & FTRACE_FL_REGS &&
1760                            ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1761                                if (!test_rec_ops_needs_regs(rec))
1762                                        rec->flags &= ~FTRACE_FL_REGS;
1763                        }
1764
1765                        /*
1766                         * If the rec had TRAMP enabled, then it needs to
1767                         * be cleared. As TRAMP can only be enabled iff
1768                         * there is only a single ops attached to it.
1769                         * In otherwords, always disable it on decrementing.
1770                         * In the future, we may set it if rec count is
1771                         * decremented to one, and the ops that is left
1772                         * has a trampoline.
1773                         */
1774                        rec->flags &= ~FTRACE_FL_TRAMP;
1775
1776                        /*
1777                         * flags will be cleared in ftrace_check_record()
1778                         * if rec count is zero.
1779                         */
1780                }
1781                count++;
1782                /* Shortcut, if we handled all records, we are done. */
1783                if (!all && count == hash->count)
1784                        return;
1785        } while_for_each_ftrace_rec();
1786}
1787
1788static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1789                                    int filter_hash)
1790{
1791        __ftrace_hash_rec_update(ops, filter_hash, 0);
1792}
1793
1794static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1795                                   int filter_hash)
1796{
1797        __ftrace_hash_rec_update(ops, filter_hash, 1);
1798}
1799
1800static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1801                                          int filter_hash, int inc)
1802{
1803        struct ftrace_ops *op;
1804
1805        __ftrace_hash_rec_update(ops, filter_hash, inc);
1806
1807        if (ops->func_hash != &global_ops.local_hash)
1808                return;
1809
1810        /*
1811         * If the ops shares the global_ops hash, then we need to update
1812         * all ops that are enabled and use this hash.
1813         */
1814        do_for_each_ftrace_op(op, ftrace_ops_list) {
1815                /* Already done */
1816                if (op == ops)
1817                        continue;
1818                if (op->func_hash == &global_ops.local_hash)
1819                        __ftrace_hash_rec_update(op, filter_hash, inc);
1820        } while_for_each_ftrace_op(op);
1821}
1822
1823static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1824                                           int filter_hash)
1825{
1826        ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1827}
1828
1829static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1830                                          int filter_hash)
1831{
1832        ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1833}
1834
1835/*
1836 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1837 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1838 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1839 * Note that old_hash and new_hash has below meanings
1840 *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1841 *  - If the hash is EMPTY_HASH, it hits nothing
1842 *  - Anything else hits the recs which match the hash entries.
1843 */
1844static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1845                                         struct ftrace_hash *old_hash,
1846                                         struct ftrace_hash *new_hash)
1847{
1848        struct ftrace_page *pg;
1849        struct dyn_ftrace *rec, *end = NULL;
1850        int in_old, in_new;
1851
1852        /* Only update if the ops has been registered */
1853        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1854                return 0;
1855
1856        if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1857                return 0;
1858
1859        /*
1860         * Since the IPMODIFY is a very address sensitive action, we do not
1861         * allow ftrace_ops to set all functions to new hash.
1862         */
1863        if (!new_hash || !old_hash)
1864                return -EINVAL;
1865
1866        /* Update rec->flags */
1867        do_for_each_ftrace_rec(pg, rec) {
1868                /* We need to update only differences of filter_hash */
1869                in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1870                in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1871                if (in_old == in_new)
1872                        continue;
1873
1874                if (in_new) {
1875                        /* New entries must ensure no others are using it */
1876                        if (rec->flags & FTRACE_FL_IPMODIFY)
1877                                goto rollback;
1878                        rec->flags |= FTRACE_FL_IPMODIFY;
1879                } else /* Removed entry */
1880                        rec->flags &= ~FTRACE_FL_IPMODIFY;
1881        } while_for_each_ftrace_rec();
1882
1883        return 0;
1884
1885rollback:
1886        end = rec;
1887
1888        /* Roll back what we did above */
1889        do_for_each_ftrace_rec(pg, rec) {
1890                if (rec == end)
1891                        goto err_out;
1892
1893                in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1894                in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1895                if (in_old == in_new)
1896                        continue;
1897
1898                if (in_new)
1899                        rec->flags &= ~FTRACE_FL_IPMODIFY;
1900                else
1901                        rec->flags |= FTRACE_FL_IPMODIFY;
1902        } while_for_each_ftrace_rec();
1903
1904err_out:
1905        return -EBUSY;
1906}
1907
1908static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1909{
1910        struct ftrace_hash *hash = ops->func_hash->filter_hash;
1911
1912        if (ftrace_hash_empty(hash))
1913                hash = NULL;
1914
1915        return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1916}
1917
1918/* Disabling always succeeds */
1919static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1920{
1921        struct ftrace_hash *hash = ops->func_hash->filter_hash;
1922
1923        if (ftrace_hash_empty(hash))
1924                hash = NULL;
1925
1926        __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1927}
1928
1929static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1930                                       struct ftrace_hash *new_hash)
1931{
1932        struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1933
1934        if (ftrace_hash_empty(old_hash))
1935                old_hash = NULL;
1936
1937        if (ftrace_hash_empty(new_hash))
1938                new_hash = NULL;
1939
1940        return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1941}
1942
1943static void print_ip_ins(const char *fmt, unsigned char *p)
1944{
1945        int i;
1946
1947        printk(KERN_CONT "%s", fmt);
1948
1949        for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1950                printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1951}
1952
1953static struct ftrace_ops *
1954ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1955
1956/**
1957 * ftrace_bug - report and shutdown function tracer
1958 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1959 * @rec: The record that failed
1960 *
1961 * The arch code that enables or disables the function tracing
1962 * can call ftrace_bug() when it has detected a problem in
1963 * modifying the code. @failed should be one of either:
1964 * EFAULT - if the problem happens on reading the @ip address
1965 * EINVAL - if what is read at @ip is not what was expected
1966 * EPERM - if the problem happens on writting to the @ip address
1967 */
1968void ftrace_bug(int failed, struct dyn_ftrace *rec)
1969{
1970        unsigned long ip = rec ? rec->ip : 0;
1971
1972        switch (failed) {
1973        case -EFAULT:
1974                FTRACE_WARN_ON_ONCE(1);
1975                pr_info("ftrace faulted on modifying ");
1976                print_ip_sym(ip);
1977                break;
1978        case -EINVAL:
1979                FTRACE_WARN_ON_ONCE(1);
1980                pr_info("ftrace failed to modify ");
1981                print_ip_sym(ip);
1982                print_ip_ins(" actual: ", (unsigned char *)ip);
1983                pr_cont("\n");
1984                break;
1985        case -EPERM:
1986                FTRACE_WARN_ON_ONCE(1);
1987                pr_info("ftrace faulted on writing ");
1988                print_ip_sym(ip);
1989                break;
1990        default:
1991                FTRACE_WARN_ON_ONCE(1);
1992                pr_info("ftrace faulted on unknown error ");
1993                print_ip_sym(ip);
1994        }
1995        if (rec) {
1996                struct ftrace_ops *ops = NULL;
1997
1998                pr_info("ftrace record flags: %lx\n", rec->flags);
1999                pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2000                        rec->flags & FTRACE_FL_REGS ? " R" : "  ");
2001                if (rec->flags & FTRACE_FL_TRAMP_EN) {
2002                        ops = ftrace_find_tramp_ops_any(rec);
2003                        if (ops)
2004                                pr_cont("\ttramp: %pS",
2005                                        (void *)ops->trampoline);
2006                        else
2007                                pr_cont("\ttramp: ERROR!");
2008
2009                }
2010                ip = ftrace_get_addr_curr(rec);
2011                pr_cont(" expected tramp: %lx\n", ip);
2012        }
2013}
2014
2015static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
2016{
2017        unsigned long flag = 0UL;
2018
2019        /*
2020         * If we are updating calls:
2021         *
2022         *   If the record has a ref count, then we need to enable it
2023         *   because someone is using it.
2024         *
2025         *   Otherwise we make sure its disabled.
2026         *
2027         * If we are disabling calls, then disable all records that
2028         * are enabled.
2029         */
2030        if (enable && ftrace_rec_count(rec))
2031                flag = FTRACE_FL_ENABLED;
2032
2033        /*
2034         * If enabling and the REGS flag does not match the REGS_EN, or
2035         * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2036         * this record. Set flags to fail the compare against ENABLED.
2037         */
2038        if (flag) {
2039                if (!(rec->flags & FTRACE_FL_REGS) !=
2040                    !(rec->flags & FTRACE_FL_REGS_EN))
2041                        flag |= FTRACE_FL_REGS;
2042
2043                if (!(rec->flags & FTRACE_FL_TRAMP) !=
2044                    !(rec->flags & FTRACE_FL_TRAMP_EN))
2045                        flag |= FTRACE_FL_TRAMP;
2046        }
2047
2048        /* If the state of this record hasn't changed, then do nothing */
2049        if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2050                return FTRACE_UPDATE_IGNORE;
2051
2052        if (flag) {
2053                /* Save off if rec is being enabled (for return value) */
2054                flag ^= rec->flags & FTRACE_FL_ENABLED;
2055
2056                if (update) {
2057                        rec->flags |= FTRACE_FL_ENABLED;
2058                        if (flag & FTRACE_FL_REGS) {
2059                                if (rec->flags & FTRACE_FL_REGS)
2060                                        rec->flags |= FTRACE_FL_REGS_EN;
2061                                else
2062                                        rec->flags &= ~FTRACE_FL_REGS_EN;
2063                        }
2064                        if (flag & FTRACE_FL_TRAMP) {
2065                                if (rec->flags & FTRACE_FL_TRAMP)
2066                                        rec->flags |= FTRACE_FL_TRAMP_EN;
2067                                else
2068                                        rec->flags &= ~FTRACE_FL_TRAMP_EN;
2069                        }
2070                }
2071
2072                /*
2073                 * If this record is being updated from a nop, then
2074                 *   return UPDATE_MAKE_CALL.
2075                 * Otherwise,
2076                 *   return UPDATE_MODIFY_CALL to tell the caller to convert
2077                 *   from the save regs, to a non-save regs function or
2078                 *   vice versa, or from a trampoline call.
2079                 */
2080                if (flag & FTRACE_FL_ENABLED)
2081                        return FTRACE_UPDATE_MAKE_CALL;
2082
2083                return FTRACE_UPDATE_MODIFY_CALL;
2084        }
2085
2086        if (update) {
2087                /* If there's no more users, clear all flags */
2088                if (!ftrace_rec_count(rec))
2089                        rec->flags = 0;
2090                else
2091                        /*
2092                         * Just disable the record, but keep the ops TRAMP
2093                         * and REGS states. The _EN flags must be disabled though.
2094                         */
2095                        rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2096                                        FTRACE_FL_REGS_EN);
2097        }
2098
2099        return FTRACE_UPDATE_MAKE_NOP;
2100}
2101
2102/**
2103 * ftrace_update_record, set a record that now is tracing or not
2104 * @rec: the record to update
2105 * @enable: set to 1 if the record is tracing, zero to force disable
2106 *
2107 * The records that represent all functions that can be traced need
2108 * to be updated when tracing has been enabled.
2109 */
2110int ftrace_update_record(struct dyn_ftrace *rec, int enable)
2111{
2112        return ftrace_check_record(rec, enable, 1);
2113}
2114
2115/**
2116 * ftrace_test_record, check if the record has been enabled or not
2117 * @rec: the record to test
2118 * @enable: set to 1 to check if enabled, 0 if it is disabled
2119 *
2120 * The arch code may need to test if a record is already set to
2121 * tracing to determine how to modify the function code that it
2122 * represents.
2123 */
2124int ftrace_test_record(struct dyn_ftrace *rec, int enable)
2125{
2126        return ftrace_check_record(rec, enable, 0);
2127}
2128
2129static struct ftrace_ops *
2130ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2131{
2132        struct ftrace_ops *op;
2133        unsigned long ip = rec->ip;
2134
2135        do_for_each_ftrace_op(op, ftrace_ops_list) {
2136
2137                if (!op->trampoline)
2138                        continue;
2139
2140                if (hash_contains_ip(ip, op->func_hash))
2141                        return op;
2142        } while_for_each_ftrace_op(op);
2143
2144        return NULL;
2145}
2146
2147static struct ftrace_ops *
2148ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2149{
2150        struct ftrace_ops *op;
2151        unsigned long ip = rec->ip;
2152
2153        /*
2154         * Need to check removed ops first.
2155         * If they are being removed, and this rec has a tramp,
2156         * and this rec is in the ops list, then it would be the
2157         * one with the tramp.
2158         */
2159        if (removed_ops) {
2160                if (hash_contains_ip(ip, &removed_ops->old_hash))
2161                        return removed_ops;
2162        }
2163
2164        /*
2165         * Need to find the current trampoline for a rec.
2166         * Now, a trampoline is only attached to a rec if there
2167         * was a single 'ops' attached to it. But this can be called
2168         * when we are adding another op to the rec or removing the
2169         * current one. Thus, if the op is being added, we can
2170         * ignore it because it hasn't attached itself to the rec
2171         * yet.
2172         *
2173         * If an ops is being modified (hooking to different functions)
2174         * then we don't care about the new functions that are being
2175         * added, just the old ones (that are probably being removed).
2176         *
2177         * If we are adding an ops to a function that already is using
2178         * a trampoline, it needs to be removed (trampolines are only
2179         * for single ops connected), then an ops that is not being
2180         * modified also needs to be checked.
2181         */
2182        do_for_each_ftrace_op(op, ftrace_ops_list) {
2183
2184                if (!op->trampoline)
2185                        continue;
2186
2187                /*
2188                 * If the ops is being added, it hasn't gotten to
2189                 * the point to be removed from this tree yet.
2190                 */
2191                if (op->flags & FTRACE_OPS_FL_ADDING)
2192                        continue;
2193
2194
2195                /*
2196                 * If the ops is being modified and is in the old
2197                 * hash, then it is probably being removed from this
2198                 * function.
2199                 */
2200                if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2201                    hash_contains_ip(ip, &op->old_hash))
2202                        return op;
2203                /*
2204                 * If the ops is not being added or modified, and it's
2205                 * in its normal filter hash, then this must be the one
2206                 * we want!
2207                 */
2208                if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2209                    hash_contains_ip(ip, op->func_hash))
2210                        return op;
2211
2212        } while_for_each_ftrace_op(op);
2213
2214        return NULL;
2215}
2216
2217static struct ftrace_ops *
2218ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2219{
2220        struct ftrace_ops *op;
2221        unsigned long ip = rec->ip;
2222
2223        do_for_each_ftrace_op(op, ftrace_ops_list) {
2224                /* pass rec in as regs to have non-NULL val */
2225                if (hash_contains_ip(ip, op->func_hash))
2226                        return op;
2227        } while_for_each_ftrace_op(op);
2228
2229        return NULL;
2230}
2231
2232/**
2233 * ftrace_get_addr_new - Get the call address to set to
2234 * @rec:  The ftrace record descriptor
2235 *
2236 * If the record has the FTRACE_FL_REGS set, that means that it
2237 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2238 * is not not set, then it wants to convert to the normal callback.
2239 *
2240 * Returns the address of the trampoline to set to
2241 */
2242unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2243{
2244        struct ftrace_ops *ops;
2245
2246        /* Trampolines take precedence over regs */
2247        if (rec->flags & FTRACE_FL_TRAMP) {
2248                ops = ftrace_find_tramp_ops_new(rec);
2249                if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2250                        pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2251                                (void *)rec->ip, (void *)rec->ip, rec->flags);
2252                        /* Ftrace is shutting down, return anything */
2253                        return (unsigned long)FTRACE_ADDR;
2254                }
2255                return ops->trampoline;
2256        }
2257
2258        if (rec->flags & FTRACE_FL_REGS)
2259                return (unsigned long)FTRACE_REGS_ADDR;
2260        else
2261                return (unsigned long)FTRACE_ADDR;
2262}
2263
2264/**
2265 * ftrace_get_addr_curr - Get the call address that is already there
2266 * @rec:  The ftrace record descriptor
2267 *
2268 * The FTRACE_FL_REGS_EN is set when the record already points to
2269 * a function that saves all the regs. Basically the '_EN' version
2270 * represents the current state of the function.
2271 *
2272 * Returns the address of the trampoline that is currently being called
2273 */
2274unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2275{
2276        struct ftrace_ops *ops;
2277
2278        /* Trampolines take precedence over regs */
2279        if (rec->flags & FTRACE_FL_TRAMP_EN) {
2280                ops = ftrace_find_tramp_ops_curr(rec);
2281                if (FTRACE_WARN_ON(!ops)) {
2282                        pr_warning("Bad trampoline accounting at: %p (%pS)\n",
2283                                    (void *)rec->ip, (void *)rec->ip);
2284                        /* Ftrace is shutting down, return anything */
2285                        return (unsigned long)FTRACE_ADDR;
2286                }
2287                return ops->trampoline;
2288        }
2289
2290        if (rec->flags & FTRACE_FL_REGS_EN)
2291                return (unsigned long)FTRACE_REGS_ADDR;
2292        else
2293                return (unsigned long)FTRACE_ADDR;
2294}
2295
2296static int
2297__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2298{
2299        unsigned long ftrace_old_addr;
2300        unsigned long ftrace_addr;
2301        int ret;
2302
2303        ftrace_addr = ftrace_get_addr_new(rec);
2304
2305        /* This needs to be done before we call ftrace_update_record */
2306        ftrace_old_addr = ftrace_get_addr_curr(rec);
2307
2308        ret = ftrace_update_record(rec, enable);
2309
2310        switch (ret) {
2311        case FTRACE_UPDATE_IGNORE:
2312                return 0;
2313
2314        case FTRACE_UPDATE_MAKE_CALL:
2315                return ftrace_make_call(rec, ftrace_addr);
2316
2317        case FTRACE_UPDATE_MAKE_NOP:
2318                return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2319
2320        case FTRACE_UPDATE_MODIFY_CALL:
2321                return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2322        }
2323
2324        return -1; /* unknow ftrace bug */
2325}
2326
2327void __weak ftrace_replace_code(int enable)
2328{
2329        struct dyn_ftrace *rec;
2330        struct ftrace_page *pg;
2331        int failed;
2332
2333        if (unlikely(ftrace_disabled))
2334                return;
2335
2336        do_for_each_ftrace_rec(pg, rec) {
2337                failed = __ftrace_replace_code(rec, enable);
2338                if (failed) {
2339                        ftrace_bug(failed, rec);
2340                        /* Stop processing */
2341                        return;
2342                }
2343        } while_for_each_ftrace_rec();
2344}
2345
2346struct ftrace_rec_iter {
2347        struct ftrace_page      *pg;
2348        int                     index;
2349};
2350
2351/**
2352 * ftrace_rec_iter_start, start up iterating over traced functions
2353 *
2354 * Returns an iterator handle that is used to iterate over all
2355 * the records that represent address locations where functions
2356 * are traced.
2357 *
2358 * May return NULL if no records are available.
2359 */
2360struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2361{
2362        /*
2363         * We only use a single iterator.
2364         * Protected by the ftrace_lock mutex.
2365         */
2366        static struct ftrace_rec_iter ftrace_rec_iter;
2367        struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2368
2369        iter->pg = ftrace_pages_start;
2370        iter->index = 0;
2371
2372        /* Could have empty pages */
2373        while (iter->pg && !iter->pg->index)
2374                iter->pg = iter->pg->next;
2375
2376        if (!iter->pg)
2377                return NULL;
2378
2379        return iter;
2380}
2381
2382/**
2383 * ftrace_rec_iter_next, get the next record to process.
2384 * @iter: The handle to the iterator.
2385 *
2386 * Returns the next iterator after the given iterator @iter.
2387 */
2388struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2389{
2390        iter->index++;
2391
2392        if (iter->index >= iter->pg->index) {
2393                iter->pg = iter->pg->next;
2394                iter->index = 0;
2395
2396                /* Could have empty pages */
2397                while (iter->pg && !iter->pg->index)
2398                        iter->pg = iter->pg->next;
2399        }
2400
2401        if (!iter->pg)
2402                return NULL;
2403
2404        return iter;
2405}
2406
2407/**
2408 * ftrace_rec_iter_record, get the record at the iterator location
2409 * @iter: The current iterator location
2410 *
2411 * Returns the record that the current @iter is at.
2412 */
2413struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2414{
2415        return &iter->pg->records[iter->index];
2416}
2417
2418static int
2419ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2420{
2421        int ret;
2422
2423        if (unlikely(ftrace_disabled))
2424                return 0;
2425
2426        ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2427        if (ret) {
2428                ftrace_bug(ret, rec);
2429                return 0;
2430        }
2431        return 1;
2432}
2433
2434/*
2435 * archs can override this function if they must do something
2436 * before the modifying code is performed.
2437 */
2438int __weak ftrace_arch_code_modify_prepare(void)
2439{
2440        return 0;
2441}
2442
2443/*
2444 * archs can override this function if they must do something
2445 * after the modifying code is performed.
2446 */
2447int __weak ftrace_arch_code_modify_post_process(void)
2448{
2449        return 0;
2450}
2451
2452void ftrace_modify_all_code(int command)
2453{
2454        int update = command & FTRACE_UPDATE_TRACE_FUNC;
2455        int err = 0;
2456
2457        /*
2458         * If the ftrace_caller calls a ftrace_ops func directly,
2459         * we need to make sure that it only traces functions it
2460         * expects to trace. When doing the switch of functions,
2461         * we need to update to the ftrace_ops_list_func first
2462         * before the transition between old and new calls are set,
2463         * as the ftrace_ops_list_func will check the ops hashes
2464         * to make sure the ops are having the right functions
2465         * traced.
2466         */
2467        if (update) {
2468                err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2469                if (FTRACE_WARN_ON(err))
2470                        return;
2471        }
2472
2473        if (command & FTRACE_UPDATE_CALLS)
2474                ftrace_replace_code(1);
2475        else if (command & FTRACE_DISABLE_CALLS)
2476                ftrace_replace_code(0);
2477
2478        if (update && ftrace_trace_function != ftrace_ops_list_func) {
2479                function_trace_op = set_function_trace_op;
2480                smp_wmb();
2481                /* If irqs are disabled, we are in stop machine */
2482                if (!irqs_disabled())
2483                        smp_call_function(ftrace_sync_ipi, NULL, 1);
2484                err = ftrace_update_ftrace_func(ftrace_trace_function);
2485                if (FTRACE_WARN_ON(err))
2486                        return;
2487        }
2488
2489        if (command & FTRACE_START_FUNC_RET)
2490                err = ftrace_enable_ftrace_graph_caller();
2491        else if (command & FTRACE_STOP_FUNC_RET)
2492                err = ftrace_disable_ftrace_graph_caller();
2493        FTRACE_WARN_ON(err);
2494}
2495
2496static int __ftrace_modify_code(void *data)
2497{
2498        int *command = data;
2499
2500        ftrace_modify_all_code(*command);
2501
2502        return 0;
2503}
2504
2505/**
2506 * ftrace_run_stop_machine, go back to the stop machine method
2507 * @command: The command to tell ftrace what to do
2508 *
2509 * If an arch needs to fall back to the stop machine method, the
2510 * it can call this function.
2511 */
2512void ftrace_run_stop_machine(int command)
2513{
2514        stop_machine(__ftrace_modify_code, &command, NULL);
2515}
2516
2517/**
2518 * arch_ftrace_update_code, modify the code to trace or not trace
2519 * @command: The command that needs to be done
2520 *
2521 * Archs can override this function if it does not need to
2522 * run stop_machine() to modify code.
2523 */
2524void __weak arch_ftrace_update_code(int command)
2525{
2526        ftrace_run_stop_machine(command);
2527}
2528
2529static void ftrace_run_update_code(int command)
2530{
2531        int ret;
2532
2533        ret = ftrace_arch_code_modify_prepare();
2534        FTRACE_WARN_ON(ret);
2535        if (ret)
2536                return;
2537
2538        /*
2539         * By default we use stop_machine() to modify the code.
2540         * But archs can do what ever they want as long as it
2541         * is safe. The stop_machine() is the safest, but also
2542         * produces the most overhead.
2543         */
2544        arch_ftrace_update_code(command);
2545
2546        ret = ftrace_arch_code_modify_post_process();
2547        FTRACE_WARN_ON(ret);
2548}
2549
2550static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2551                                   struct ftrace_ops_hash *old_hash)
2552{
2553        ops->flags |= FTRACE_OPS_FL_MODIFYING;
2554        ops->old_hash.filter_hash = old_hash->filter_hash;
2555        ops->old_hash.notrace_hash = old_hash->notrace_hash;
2556        ftrace_run_update_code(command);
2557        ops->old_hash.filter_hash = NULL;
2558        ops->old_hash.notrace_hash = NULL;
2559        ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2560}
2561
2562static ftrace_func_t saved_ftrace_func;
2563static int ftrace_start_up;
2564
2565void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2566{
2567}
2568
2569static void control_ops_free(struct ftrace_ops *ops)
2570{
2571        free_percpu(ops->disabled);
2572}
2573
2574static void ftrace_startup_enable(int command)
2575{
2576        if (saved_ftrace_func != ftrace_trace_function) {
2577                saved_ftrace_func = ftrace_trace_function;
2578                command |= FTRACE_UPDATE_TRACE_FUNC;
2579        }
2580
2581        if (!command || !ftrace_enabled)
2582                return;
2583
2584        ftrace_run_update_code(command);
2585}
2586
2587static void ftrace_startup_all(int command)
2588{
2589        update_all_ops = true;
2590        ftrace_startup_enable(command);
2591        update_all_ops = false;
2592}
2593
2594static int ftrace_startup(struct ftrace_ops *ops, int command)
2595{
2596        int ret;
2597
2598        if (unlikely(ftrace_disabled))
2599                return -ENODEV;
2600
2601        ret = __register_ftrace_function(ops);
2602        if (ret)
2603                return ret;
2604
2605        ftrace_start_up++;
2606        command |= FTRACE_UPDATE_CALLS;
2607
2608        /*
2609         * Note that ftrace probes uses this to start up
2610         * and modify functions it will probe. But we still
2611         * set the ADDING flag for modification, as probes
2612         * do not have trampolines. If they add them in the
2613         * future, then the probes will need to distinguish
2614         * between adding and updating probes.
2615         */
2616        ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2617
2618        ret = ftrace_hash_ipmodify_enable(ops);
2619        if (ret < 0) {
2620                /* Rollback registration process */
2621                __unregister_ftrace_function(ops);
2622                ftrace_start_up--;
2623                ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2624                return ret;
2625        }
2626
2627        ftrace_hash_rec_enable(ops, 1);
2628
2629        ftrace_startup_enable(command);
2630
2631        ops->flags &= ~FTRACE_OPS_FL_ADDING;
2632
2633        return 0;
2634}
2635
2636static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2637{
2638        int ret;
2639
2640        if (unlikely(ftrace_disabled))
2641                return -ENODEV;
2642
2643        ret = __unregister_ftrace_function(ops);
2644        if (ret)
2645                return ret;
2646
2647        ftrace_start_up--;
2648        /*
2649         * Just warn in case of unbalance, no need to kill ftrace, it's not
2650         * critical but the ftrace_call callers may be never nopped again after
2651         * further ftrace uses.
2652         */
2653        WARN_ON_ONCE(ftrace_start_up < 0);
2654
2655        /* Disabling ipmodify never fails */
2656        ftrace_hash_ipmodify_disable(ops);
2657        ftrace_hash_rec_disable(ops, 1);
2658
2659        ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2660
2661        command |= FTRACE_UPDATE_CALLS;
2662
2663        if (saved_ftrace_func != ftrace_trace_function) {
2664                saved_ftrace_func = ftrace_trace_function;
2665                command |= FTRACE_UPDATE_TRACE_FUNC;
2666        }
2667
2668        if (!command || !ftrace_enabled) {
2669                /*
2670                 * If these are control ops, they still need their
2671                 * per_cpu field freed. Since, function tracing is
2672                 * not currently active, we can just free them
2673                 * without synchronizing all CPUs.
2674                 */
2675                if (ops->flags & FTRACE_OPS_FL_CONTROL)
2676                        control_ops_free(ops);
2677                return 0;
2678        }
2679
2680        /*
2681         * If the ops uses a trampoline, then it needs to be
2682         * tested first on update.
2683         */
2684        ops->flags |= FTRACE_OPS_FL_REMOVING;
2685        removed_ops = ops;
2686
2687        /* The trampoline logic checks the old hashes */
2688        ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2689        ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2690
2691        ftrace_run_update_code(command);
2692
2693        /*
2694         * If there's no more ops registered with ftrace, run a
2695         * sanity check to make sure all rec flags are cleared.
2696         */
2697        if (ftrace_ops_list == &ftrace_list_end) {
2698                struct ftrace_page *pg;
2699                struct dyn_ftrace *rec;
2700
2701                do_for_each_ftrace_rec(pg, rec) {
2702                        if (FTRACE_WARN_ON_ONCE(rec->flags))
2703                                pr_warn("  %pS flags:%lx\n",
2704                                        (void *)rec->ip, rec->flags);
2705                } while_for_each_ftrace_rec();
2706        }
2707
2708        ops->old_hash.filter_hash = NULL;
2709        ops->old_hash.notrace_hash = NULL;
2710
2711        removed_ops = NULL;
2712        ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2713
2714        /*
2715         * Dynamic ops may be freed, we must make sure that all
2716         * callers are done before leaving this function.
2717         * The same goes for freeing the per_cpu data of the control
2718         * ops.
2719         *
2720         * Again, normal synchronize_sched() is not good enough.
2721         * We need to do a hard force of sched synchronization.
2722         * This is because we use preempt_disable() to do RCU, but
2723         * the function tracers can be called where RCU is not watching
2724         * (like before user_exit()). We can not rely on the RCU
2725         * infrastructure to do the synchronization, thus we must do it
2726         * ourselves.
2727         */
2728        if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2729                schedule_on_each_cpu(ftrace_sync);
2730
2731                arch_ftrace_trampoline_free(ops);
2732
2733                if (ops->flags & FTRACE_OPS_FL_CONTROL)
2734                        control_ops_free(ops);
2735        }
2736
2737        return 0;
2738}
2739
2740static void ftrace_startup_sysctl(void)
2741{
2742        int command;
2743
2744        if (unlikely(ftrace_disabled))
2745                return;
2746
2747        /* Force update next time */
2748        saved_ftrace_func = NULL;
2749        /* ftrace_start_up is true if we want ftrace running */
2750        if (ftrace_start_up) {
2751                command = FTRACE_UPDATE_CALLS;
2752                if (ftrace_graph_active)
2753                        command |= FTRACE_START_FUNC_RET;
2754                ftrace_startup_enable(command);
2755        }
2756}
2757
2758static void ftrace_shutdown_sysctl(void)
2759{
2760        int command;
2761
2762        if (unlikely(ftrace_disabled))
2763                return;
2764
2765        /* ftrace_start_up is true if ftrace is running */
2766        if (ftrace_start_up) {
2767                command = FTRACE_DISABLE_CALLS;
2768                if (ftrace_graph_active)
2769                        command |= FTRACE_STOP_FUNC_RET;
2770                ftrace_run_update_code(command);
2771        }
2772}
2773
2774static cycle_t          ftrace_update_time;
2775unsigned long           ftrace_update_tot_cnt;
2776
2777static inline int ops_traces_mod(struct ftrace_ops *ops)
2778{
2779        /*
2780         * Filter_hash being empty will default to trace module.
2781         * But notrace hash requires a test of individual module functions.
2782         */
2783        return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2784                ftrace_hash_empty(ops->func_hash->notrace_hash);
2785}
2786
2787/*
2788 * Check if the current ops references the record.
2789 *
2790 * If the ops traces all functions, then it was already accounted for.
2791 * If the ops does not trace the current record function, skip it.
2792 * If the ops ignores the function via notrace filter, skip it.
2793 */
2794static inline bool
2795ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2796{
2797        /* If ops isn't enabled, ignore it */
2798        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2799                return 0;
2800
2801        /* If ops traces all mods, we already accounted for it */
2802        if (ops_traces_mod(ops))
2803                return 0;
2804
2805        /* The function must be in the filter */
2806        if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2807            !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2808                return 0;
2809
2810        /* If in notrace hash, we ignore it too */
2811        if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2812                return 0;
2813
2814        return 1;
2815}
2816
2817static int referenced_filters(struct dyn_ftrace *rec)
2818{
2819        struct ftrace_ops *ops;
2820        int cnt = 0;
2821
2822        for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2823                if (ops_references_rec(ops, rec))
2824                    cnt++;
2825        }
2826
2827        return cnt;
2828}
2829
2830static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2831{
2832        struct ftrace_page *pg;
2833        struct dyn_ftrace *p;
2834        cycle_t start, stop;
2835        unsigned long update_cnt = 0;
2836        unsigned long ref = 0;
2837        bool test = false;
2838        int i;
2839
2840        /*
2841         * When adding a module, we need to check if tracers are
2842         * currently enabled and if they are set to trace all functions.
2843         * If they are, we need to enable the module functions as well
2844         * as update the reference counts for those function records.
2845         */
2846        if (mod) {
2847                struct ftrace_ops *ops;
2848
2849                for (ops = ftrace_ops_list;
2850                     ops != &ftrace_list_end; ops = ops->next) {
2851                        if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2852                                if (ops_traces_mod(ops))
2853                                        ref++;
2854                                else
2855                                        test = true;
2856                        }
2857                }
2858        }
2859
2860        start = ftrace_now(raw_smp_processor_id());
2861
2862        for (pg = new_pgs; pg; pg = pg->next) {
2863
2864                for (i = 0; i < pg->index; i++) {
2865                        int cnt = ref;
2866
2867                        /* If something went wrong, bail without enabling anything */
2868                        if (unlikely(ftrace_disabled))
2869                                return -1;
2870
2871                        p = &pg->records[i];
2872                        if (test)
2873                                cnt += referenced_filters(p);
2874                        p->flags = cnt;
2875
2876                        /*
2877                         * Do the initial record conversion from mcount jump
2878                         * to the NOP instructions.
2879                         */
2880                        if (!ftrace_code_disable(mod, p))
2881                                break;
2882
2883                        update_cnt++;
2884
2885                        /*
2886                         * If the tracing is enabled, go ahead and enable the record.
2887                         *
2888                         * The reason not to enable the record immediatelly is the
2889                         * inherent check of ftrace_make_nop/ftrace_make_call for
2890                         * correct previous instructions.  Making first the NOP
2891                         * conversion puts the module to the correct state, thus
2892                         * passing the ftrace_make_call check.
2893                         */
2894                        if (ftrace_start_up && cnt) {
2895                                int failed = __ftrace_replace_code(p, 1);
2896                                if (failed)
2897                                        ftrace_bug(failed, p);
2898                        }
2899                }
2900        }
2901
2902        stop = ftrace_now(raw_smp_processor_id());
2903        ftrace_update_time = stop - start;
2904        ftrace_update_tot_cnt += update_cnt;
2905
2906        return 0;
2907}
2908
2909static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2910{
2911        int order;
2912        int cnt;
2913
2914        if (WARN_ON(!count))
2915                return -EINVAL;
2916
2917        order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2918
2919        /*
2920         * We want to fill as much as possible. No more than a page
2921         * may be empty.
2922         */
2923        while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2924                order--;
2925
2926 again:
2927        pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2928
2929        if (!pg->records) {
2930                /* if we can't allocate this size, try something smaller */
2931                if (!order)
2932                        return -ENOMEM;
2933                order >>= 1;
2934                goto again;
2935        }
2936
2937        cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2938        pg->size = cnt;
2939
2940        if (cnt > count)
2941                cnt = count;
2942
2943        return cnt;
2944}
2945
2946static struct ftrace_page *
2947ftrace_allocate_pages(unsigned long num_to_init)
2948{
2949        struct ftrace_page *start_pg;
2950        struct ftrace_page *pg;
2951        int order;
2952        int cnt;
2953
2954        if (!num_to_init)
2955                return 0;
2956
2957        start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2958        if (!pg)
2959                return NULL;
2960
2961        /*
2962         * Try to allocate as much as possible in one continues
2963         * location that fills in all of the space. We want to
2964         * waste as little space as possible.
2965         */
2966        for (;;) {
2967                cnt = ftrace_allocate_records(pg, num_to_init);
2968                if (cnt < 0)
2969                        goto free_pages;
2970
2971                num_to_init -= cnt;
2972                if (!num_to_init)
2973                        break;
2974
2975                pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2976                if (!pg->next)
2977                        goto free_pages;
2978
2979                pg = pg->next;
2980        }
2981
2982        return start_pg;
2983
2984 free_pages:
2985        pg = start_pg;
2986        while (pg) {
2987                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2988                free_pages((unsigned long)pg->records, order);
2989                start_pg = pg->next;
2990                kfree(pg);
2991                pg = start_pg;
2992        }
2993        pr_info("ftrace: FAILED to allocate memory for functions\n");
2994        return NULL;
2995}
2996
2997#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2998
2999struct ftrace_iterator {
3000        loff_t                          pos;
3001        loff_t                          func_pos;
3002        struct ftrace_page              *pg;
3003        struct dyn_ftrace               *func;
3004        struct ftrace_func_probe        *probe;
3005        struct trace_parser             parser;
3006        struct ftrace_hash              *hash;
3007        struct ftrace_ops               *ops;
3008        int                             hidx;
3009        int                             idx;
3010        unsigned                        flags;
3011};
3012
3013static void *
3014t_hash_next(struct seq_file *m, loff_t *pos)
3015{
3016        struct ftrace_iterator *iter = m->private;
3017        struct hlist_node *hnd = NULL;
3018        struct hlist_head *hhd;
3019
3020        (*pos)++;
3021        iter->pos = *pos;
3022
3023        if (iter->probe)
3024                hnd = &iter->probe->node;
3025 retry:
3026        if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
3027                return NULL;
3028
3029        hhd = &ftrace_func_hash[iter->hidx];
3030
3031        if (hlist_empty(hhd)) {
3032                iter->hidx++;
3033                hnd = NULL;
3034                goto retry;
3035        }
3036
3037        if (!hnd)
3038                hnd = hhd->first;
3039        else {
3040                hnd = hnd->next;
3041                if (!hnd) {
3042                        iter->hidx++;
3043                        goto retry;
3044                }
3045        }
3046
3047        if (WARN_ON_ONCE(!hnd))
3048                return NULL;
3049
3050        iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
3051
3052        return iter;
3053}
3054
3055static void *t_hash_start(struct seq_file *m, loff_t *pos)
3056{
3057        struct ftrace_iterator *iter = m->private;
3058        void *p = NULL;
3059        loff_t l;
3060
3061        if (!(iter->flags & FTRACE_ITER_DO_HASH))
3062                return NULL;
3063
3064        if (iter->func_pos > *pos)
3065                return NULL;
3066
3067        iter->hidx = 0;
3068        for (l = 0; l <= (*pos - iter->func_pos); ) {
3069                p = t_hash_next(m, &l);
3070                if (!p)
3071                        break;
3072        }
3073        if (!p)
3074                return NULL;
3075
3076        /* Only set this if we have an item */
3077        iter->flags |= FTRACE_ITER_HASH;
3078
3079        return iter;
3080}
3081
3082static int
3083t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
3084{
3085        struct ftrace_func_probe *rec;
3086
3087        rec = iter->probe;
3088        if (WARN_ON_ONCE(!rec))
3089                return -EIO;
3090
3091        if (rec->ops->print)
3092                return rec->ops->print(m, rec->ip, rec->ops, rec->data);
3093
3094        seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
3095
3096        if (rec->data)
3097                seq_printf(m, ":%p", rec->data);
3098        seq_putc(m, '\n');
3099
3100        return 0;
3101}
3102
3103static void *
3104t_next(struct seq_file *m, void *v, loff_t *pos)
3105{
3106        struct ftrace_iterator *iter = m->private;
3107        struct ftrace_ops *ops = iter->ops;
3108        struct dyn_ftrace *rec = NULL;
3109
3110        if (unlikely(ftrace_disabled))
3111                return NULL;
3112
3113        if (iter->flags & FTRACE_ITER_HASH)
3114                return t_hash_next(m, pos);
3115
3116        (*pos)++;
3117        iter->pos = iter->func_pos = *pos;
3118
3119        if (iter->flags & FTRACE_ITER_PRINTALL)
3120                return t_hash_start(m, pos);
3121
3122 retry:
3123        if (iter->idx >= iter->pg->index) {
3124                if (iter->pg->next) {
3125                        iter->pg = iter->pg->next;
3126                        iter->idx = 0;
3127                        goto retry;
3128                }
3129        } else {
3130                rec = &iter->pg->records[iter->idx++];
3131                if (((iter->flags & FTRACE_ITER_FILTER) &&
3132                     !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
3133
3134                    ((iter->flags & FTRACE_ITER_NOTRACE) &&
3135                     !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
3136
3137                    ((iter->flags & FTRACE_ITER_ENABLED) &&
3138                     !(rec->flags & FTRACE_FL_ENABLED))) {
3139
3140                        rec = NULL;
3141                        goto retry;
3142                }
3143        }
3144
3145        if (!rec)
3146                return t_hash_start(m, pos);
3147
3148        iter->func = rec;
3149
3150        return iter;
3151}
3152
3153static void reset_iter_read(struct ftrace_iterator *iter)
3154{
3155        iter->pos = 0;
3156        iter->func_pos = 0;
3157        iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
3158}
3159
3160static void *t_start(struct seq_file *m, loff_t *pos)
3161{
3162        struct ftrace_iterator *iter = m->private;
3163        struct ftrace_ops *ops = iter->ops;
3164        void *p = NULL;
3165        loff_t l;
3166
3167        mutex_lock(&ftrace_lock);
3168
3169        if (unlikely(ftrace_disabled))
3170                return NULL;
3171
3172        /*
3173         * If an lseek was done, then reset and start from beginning.
3174         */
3175        if (*pos < iter->pos)
3176                reset_iter_read(iter);
3177
3178        /*
3179         * For set_ftrace_filter reading, if we have the filter
3180         * off, we can short cut and just print out that all
3181         * functions are enabled.
3182         */
3183        if ((iter->flags & FTRACE_ITER_FILTER &&
3184             ftrace_hash_empty(ops->func_hash->filter_hash)) ||
3185            (iter->flags & FTRACE_ITER_NOTRACE &&
3186             ftrace_hash_empty(ops->func_hash->notrace_hash))) {
3187                if (*pos > 0)
3188                        return t_hash_start(m, pos);
3189                iter->flags |= FTRACE_ITER_PRINTALL;
3190                /* reset in case of seek/pread */
3191                iter->flags &= ~FTRACE_ITER_HASH;
3192                return iter;
3193        }
3194
3195        if (iter->flags & FTRACE_ITER_HASH)
3196                return t_hash_start(m, pos);
3197
3198        /*
3199         * Unfortunately, we need to restart at ftrace_pages_start
3200         * every time we let go of the ftrace_mutex. This is because
3201         * those pointers can change without the lock.
3202         */
3203        iter->pg = ftrace_pages_start;
3204        iter->idx = 0;
3205        for (l = 0; l <= *pos; ) {
3206                p = t_next(m, p, &l);
3207                if (!p)
3208                        break;
3209        }
3210
3211        if (!p)
3212                return t_hash_start(m, pos);
3213
3214        return iter;
3215}
3216
3217static void t_stop(struct seq_file *m, void *p)
3218{
3219        mutex_unlock(&ftrace_lock);
3220}
3221
3222void * __weak
3223arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3224{
3225        return NULL;
3226}
3227
3228static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3229                                struct dyn_ftrace *rec)
3230{
3231        void *ptr;
3232
3233        ptr = arch_ftrace_trampoline_func(ops, rec);
3234        if (ptr)
3235                seq_printf(m, " ->%pS", ptr);
3236}
3237
3238static int t_show(struct seq_file *m, void *v)
3239{
3240        struct ftrace_iterator *iter = m->private;
3241        struct dyn_ftrace *rec;
3242
3243        if (iter->flags & FTRACE_ITER_HASH)
3244                return t_hash_show(m, iter);
3245
3246        if (iter->flags & FTRACE_ITER_PRINTALL) {
3247                if (iter->flags & FTRACE_ITER_NOTRACE)
3248                        seq_puts(m, "#### no functions disabled ####\n");
3249                else
3250                        seq_puts(m, "#### all functions enabled ####\n");
3251                return 0;
3252        }
3253
3254        rec = iter->func;
3255
3256        if (!rec)
3257                return 0;
3258
3259        seq_printf(m, "%ps", (void *)rec->ip);
3260        if (iter->flags & FTRACE_ITER_ENABLED) {
3261                struct ftrace_ops *ops = NULL;
3262
3263                seq_printf(m, " (%ld)%s%s",
3264                           ftrace_rec_count(rec),
3265                           rec->flags & FTRACE_FL_REGS ? " R" : "  ",
3266                           rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ");
3267                if (rec->flags & FTRACE_FL_TRAMP_EN) {
3268                        ops = ftrace_find_tramp_ops_any(rec);
3269                        if (ops)
3270                                seq_printf(m, "\ttramp: %pS",
3271                                           (void *)ops->trampoline);
3272                        else
3273                                seq_puts(m, "\ttramp: ERROR!");
3274
3275                }
3276                add_trampoline_func(m, ops, rec);
3277        }       
3278
3279        seq_putc(m, '\n');
3280
3281        return 0;
3282}
3283
3284static const struct seq_operations show_ftrace_seq_ops = {
3285        .start = t_start,
3286        .next = t_next,
3287        .stop = t_stop,
3288        .show = t_show,
3289};
3290
3291static int
3292ftrace_avail_open(struct inode *inode, struct file *file)
3293{
3294        struct ftrace_iterator *iter;
3295
3296        if (unlikely(ftrace_disabled))
3297                return -ENODEV;
3298
3299        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3300        if (iter) {
3301                iter->pg = ftrace_pages_start;
3302                iter->ops = &global_ops;
3303        }
3304
3305        return iter ? 0 : -ENOMEM;
3306}
3307
3308static int
3309ftrace_enabled_open(struct inode *inode, struct file *file)
3310{
3311        struct ftrace_iterator *iter;
3312
3313        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3314        if (iter) {
3315                iter->pg = ftrace_pages_start;
3316                iter->flags = FTRACE_ITER_ENABLED;
3317                iter->ops = &global_ops;
3318        }
3319
3320        return iter ? 0 : -ENOMEM;
3321}
3322
3323/**
3324 * ftrace_regex_open - initialize function tracer filter files
3325 * @ops: The ftrace_ops that hold the hash filters
3326 * @flag: The type of filter to process
3327 * @inode: The inode, usually passed in to your open routine
3328 * @file: The file, usually passed in to your open routine
3329 *
3330 * ftrace_regex_open() initializes the filter files for the
3331 * @ops. Depending on @flag it may process the filter hash or
3332 * the notrace hash of @ops. With this called from the open
3333 * routine, you can use ftrace_filter_write() for the write
3334 * routine if @flag has FTRACE_ITER_FILTER set, or
3335 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3336 * tracing_lseek() should be used as the lseek routine, and
3337 * release must call ftrace_regex_release().
3338 */
3339int
3340ftrace_regex_open(struct ftrace_ops *ops, int flag,
3341                  struct inode *inode, struct file *file)
3342{
3343        struct ftrace_iterator *iter;
3344        struct ftrace_hash *hash;
3345        int ret = 0;
3346
3347        ftrace_ops_init(ops);
3348
3349        if (unlikely(ftrace_disabled))
3350                return -ENODEV;
3351
3352        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3353        if (!iter)
3354                return -ENOMEM;
3355
3356        if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3357                kfree(iter);
3358                return -ENOMEM;
3359        }
3360
3361        iter->ops = ops;
3362        iter->flags = flag;
3363
3364        mutex_lock(&ops->func_hash->regex_lock);
3365
3366        if (flag & FTRACE_ITER_NOTRACE)
3367                hash = ops->func_hash->notrace_hash;
3368        else
3369                hash = ops->func_hash->filter_hash;
3370
3371        if (file->f_mode & FMODE_WRITE) {
3372                const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3373
3374                if (file->f_flags & O_TRUNC)
3375                        iter->hash = alloc_ftrace_hash(size_bits);
3376                else
3377                        iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3378
3379                if (!iter->hash) {
3380                        trace_parser_put(&iter->parser);
3381                        kfree(iter);
3382                        ret = -ENOMEM;
3383                        goto out_unlock;
3384                }
3385        }
3386
3387        if (file->f_mode & FMODE_READ) {
3388                iter->pg = ftrace_pages_start;
3389
3390                ret = seq_open(file, &show_ftrace_seq_ops);
3391                if (!ret) {
3392                        struct seq_file *m = file->private_data;
3393                        m->private = iter;
3394                } else {
3395                        /* Failed */
3396                        free_ftrace_hash(iter->hash);
3397                        trace_parser_put(&iter->parser);
3398                        kfree(iter);
3399                }
3400        } else
3401                file->private_data = iter;
3402
3403 out_unlock:
3404        mutex_unlock(&ops->func_hash->regex_lock);
3405
3406        return ret;
3407}
3408
3409static int
3410ftrace_filter_open(struct inode *inode, struct file *file)
3411{
3412        struct ftrace_ops *ops = inode->i_private;
3413
3414        return ftrace_regex_open(ops,
3415                        FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
3416                        inode, file);
3417}
3418
3419static int
3420ftrace_notrace_open(struct inode *inode, struct file *file)
3421{
3422        struct ftrace_ops *ops = inode->i_private;
3423
3424        return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3425                                 inode, file);
3426}
3427
3428/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3429struct ftrace_glob {
3430        char *search;
3431        unsigned len;
3432        int type;
3433};
3434
3435static int ftrace_match(char *str, struct ftrace_glob *g)
3436{
3437        int matched = 0;
3438        int slen;
3439
3440        switch (g->type) {
3441        case MATCH_FULL:
3442                if (strcmp(str, g->search) == 0)
3443                        matched = 1;
3444                break;
3445        case MATCH_FRONT_ONLY:
3446                if (strncmp(str, g->search, g->len) == 0)
3447                        matched = 1;
3448                break;
3449        case MATCH_MIDDLE_ONLY:
3450                if (strstr(str, g->search))
3451                        matched = 1;
3452                break;
3453        case MATCH_END_ONLY:
3454                slen = strlen(str);
3455                if (slen >= g->len &&
3456                    memcmp(str + slen - g->len, g->search, g->len) == 0)
3457                        matched = 1;
3458                break;
3459        }
3460
3461        return matched;
3462}
3463
3464static int
3465enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
3466{
3467        struct ftrace_func_entry *entry;
3468        int ret = 0;
3469
3470        entry = ftrace_lookup_ip(hash, rec->ip);
3471        if (clear_filter) {
3472                /* Do nothing if it doesn't exist */
3473                if (!entry)
3474                        return 0;
3475
3476                free_hash_entry(hash, entry);
3477        } else {
3478                /* Do nothing if it exists */
3479                if (entry)
3480                        return 0;
3481
3482                ret = add_hash_entry(hash, rec->ip);
3483        }
3484        return ret;
3485}
3486
3487static int
3488ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
3489                struct ftrace_glob *mod_g, int exclude_mod)
3490{
3491        char str[KSYM_SYMBOL_LEN];
3492        char *modname;
3493
3494        kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3495
3496        if (mod_g) {
3497                int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
3498
3499                /* blank module name to match all modules */
3500                if (!mod_g->len) {
3501                        /* blank module globbing: modname xor exclude_mod */
3502                        if ((!exclude_mod) != (!modname))
3503                                goto func_match;
3504                        return 0;
3505                }
3506
3507                /* not matching the module */
3508                if (!modname || !mod_matches) {
3509                        if (exclude_mod)
3510                                goto func_match;
3511                        else
3512                                return 0;
3513                }
3514
3515                if (mod_matches && exclude_mod)
3516                        return 0;
3517
3518func_match:
3519                /* blank search means to match all funcs in the mod */
3520                if (!func_g->len)
3521                        return 1;
3522        }
3523
3524        return ftrace_match(str, func_g);
3525}
3526
3527static int
3528match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
3529{
3530        struct ftrace_page *pg;
3531        struct dyn_ftrace *rec;
3532        struct ftrace_glob func_g = { .type = MATCH_FULL };
3533        struct ftrace_glob mod_g = { .type = MATCH_FULL };
3534        struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
3535        int exclude_mod = 0;
3536        int found = 0;
3537        int ret;
3538        int clear_filter;
3539
3540        if (func) {
3541                func_g.type = filter_parse_regex(func, len, &func_g.search,
3542                                                 &clear_filter);
3543                func_g.len = strlen(func_g.search);
3544        }
3545
3546        if (mod) {
3547                mod_g.type = filter_parse_regex(mod, strlen(mod),
3548                                &mod_g.search, &exclude_mod);
3549                mod_g.len = strlen(mod_g.search);
3550        }
3551
3552        mutex_lock(&ftrace_lock);
3553
3554        if (unlikely(ftrace_disabled))
3555                goto out_unlock;
3556
3557        do_for_each_ftrace_rec(pg, rec) {
3558                if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
3559                        ret = enter_record(hash, rec, clear_filter);
3560                        if (ret < 0) {
3561                                found = ret;
3562                                goto out_unlock;
3563                        }
3564                        found = 1;
3565                }
3566        } while_for_each_ftrace_rec();
3567 out_unlock:
3568        mutex_unlock(&ftrace_lock);
3569
3570        return found;
3571}
3572
3573static int
3574ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3575{
3576        return match_records(hash, buff, len, NULL);
3577}
3578
3579
3580/*
3581 * We register the module command as a template to show others how
3582 * to register the a command as well.
3583 */
3584
3585static int
3586ftrace_mod_callback(struct ftrace_hash *hash,
3587                    char *func, char *cmd, char *module, int enable)
3588{
3589        int ret;
3590
3591        /*
3592         * cmd == 'mod' because we only registered this func
3593         * for the 'mod' ftrace_func_command.
3594         * But if you register one func with multiple commands,
3595         * you can tell which command was used by the cmd
3596         * parameter.
3597         */
3598        ret = match_records(hash, func, strlen(func), module);
3599        if (!ret)
3600                return -EINVAL;
3601        if (ret < 0)
3602                return ret;
3603        return 0;
3604}
3605
3606static struct ftrace_func_command ftrace_mod_cmd = {
3607        .name                   = "mod",
3608        .func                   = ftrace_mod_callback,
3609};
3610
3611static int __init ftrace_mod_cmd_init(void)
3612{
3613        return register_ftrace_command(&ftrace_mod_cmd);
3614}
3615core_initcall(ftrace_mod_cmd_init);
3616
3617static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3618                                      struct ftrace_ops *op, struct pt_regs *pt_regs)
3619{
3620        struct ftrace_func_probe *entry;
3621        struct hlist_head *hhd;
3622        unsigned long key;
3623
3624        key = hash_long(ip, FTRACE_HASH_BITS);
3625
3626        hhd = &ftrace_func_hash[key];
3627
3628        if (hlist_empty(hhd))
3629                return;
3630
3631        /*
3632         * Disable preemption for these calls to prevent a RCU grace
3633         * period. This syncs the hash iteration and freeing of items
3634         * on the hash. rcu_read_lock is too dangerous here.
3635         */
3636        preempt_disable_notrace();
3637        hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
3638                if (entry->ip == ip)
3639                        entry->ops->func(ip, parent_ip, &entry->data);
3640        }
3641        preempt_enable_notrace();
3642}
3643
3644static struct ftrace_ops trace_probe_ops __read_mostly =
3645{
3646        .func           = function_trace_probe_call,
3647        .flags          = FTRACE_OPS_FL_INITIALIZED,
3648        INIT_OPS_HASH(trace_probe_ops)
3649};
3650
3651static int ftrace_probe_registered;
3652
3653static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
3654{
3655        int ret;
3656        int i;
3657
3658        if (ftrace_probe_registered) {
3659                /* still need to update the function call sites */
3660                if (ftrace_enabled)
3661                        ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3662                                               old_hash);
3663                return;
3664        }
3665
3666        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3667                struct hlist_head *hhd = &ftrace_func_hash[i];
3668                if (hhd->first)
3669                        break;
3670        }
3671        /* Nothing registered? */
3672        if (i == FTRACE_FUNC_HASHSIZE)
3673                return;
3674
3675        ret = ftrace_startup(&trace_probe_ops, 0);
3676
3677        ftrace_probe_registered = 1;
3678}
3679
3680static bool __disable_ftrace_function_probe(void)
3681{
3682        int i;
3683
3684        if (!ftrace_probe_registered)
3685                return false;
3686
3687        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3688                struct hlist_head *hhd = &ftrace_func_hash[i];
3689                if (hhd->first)
3690                        return false;
3691        }
3692
3693        /* no more funcs left */
3694        ftrace_shutdown(&trace_probe_ops, 0);
3695
3696        ftrace_probe_registered = 0;
3697        return true;
3698}
3699
3700
3701static void ftrace_free_entry(struct ftrace_func_probe *entry)
3702{
3703        if (entry->ops->free)
3704                entry->ops->free(entry->ops, entry->ip, &entry->data);
3705        kfree(entry);
3706}
3707
3708int
3709register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3710                              void *data)
3711{
3712        struct ftrace_ops_hash old_hash_ops;
3713        struct ftrace_func_probe *entry;
3714        struct ftrace_glob func_g;
3715        struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3716        struct ftrace_hash *old_hash = *orig_hash;
3717        struct ftrace_hash *hash;
3718        struct ftrace_page *pg;
3719        struct dyn_ftrace *rec;
3720        int not;
3721        unsigned long key;
3722        int count = 0;
3723        int ret;
3724
3725        func_g.type = filter_parse_regex(glob, strlen(glob),
3726                        &func_g.search, &not);
3727        func_g.len = strlen(func_g.search);
3728
3729        /* we do not support '!' for function probes */
3730        if (WARN_ON(not))
3731                return -EINVAL;
3732
3733        mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3734
3735        old_hash_ops.filter_hash = old_hash;
3736        /* Probes only have filters */
3737        old_hash_ops.notrace_hash = NULL;
3738
3739        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
3740        if (!hash) {
3741                count = -ENOMEM;
3742                goto out;
3743        }
3744
3745        if (unlikely(ftrace_disabled)) {
3746                count = -ENODEV;
3747                goto out;
3748        }
3749
3750        mutex_lock(&ftrace_lock);
3751
3752        do_for_each_ftrace_rec(pg, rec) {
3753
3754                if (!ftrace_match_record(rec, &func_g, NULL, 0))
3755                        continue;
3756
3757                entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3758                if (!entry) {
3759                        /* If we did not process any, then return error */
3760                        if (!count)
3761                                count = -ENOMEM;
3762                        goto out_unlock;
3763                }
3764
3765                count++;
3766
3767                entry->data = data;
3768
3769                /*
3770                 * The caller might want to do something special
3771                 * for each function we find. We call the callback
3772                 * to give the caller an opportunity to do so.
3773                 */
3774                if (ops->init) {
3775                        if (ops->init(ops, rec->ip, &entry->data) < 0) {
3776                                /* caller does not like this func */
3777                                kfree(entry);
3778                                continue;
3779                        }
3780                }
3781
3782                ret = enter_record(hash, rec, 0);
3783                if (ret < 0) {
3784                        kfree(entry);
3785                        count = ret;
3786                        goto out_unlock;
3787                }
3788
3789                entry->ops = ops;
3790                entry->ip = rec->ip;
3791
3792                key = hash_long(entry->ip, FTRACE_HASH_BITS);
3793                hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3794
3795        } while_for_each_ftrace_rec();
3796
3797        ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3798
3799        __enable_ftrace_function_probe(&old_hash_ops);
3800
3801        if (!ret)
3802                free_ftrace_hash_rcu(old_hash);
3803        else
3804                count = ret;
3805
3806 out_unlock:
3807        mutex_unlock(&ftrace_lock);
3808 out:
3809        mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3810        free_ftrace_hash(hash);
3811
3812        return count;
3813}
3814
3815enum {
3816        PROBE_TEST_FUNC         = 1,
3817        PROBE_TEST_DATA         = 2
3818};
3819
3820static void
3821__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3822                                  void *data, int flags)
3823{
3824        struct ftrace_ops_hash old_hash_ops;
3825        struct ftrace_func_entry *rec_entry;
3826        struct ftrace_func_probe *entry;
3827        struct ftrace_func_probe *p;
3828        struct ftrace_glob func_g;
3829        struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3830        struct ftrace_hash *old_hash = *orig_hash;
3831        struct list_head free_list;
3832        struct ftrace_hash *hash;
3833        struct hlist_node *tmp;
3834        char str[KSYM_SYMBOL_LEN];
3835        int i, ret;
3836        bool disabled;
3837
3838        if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3839                func_g.search = NULL;
3840        else if (glob) {
3841                int not;
3842
3843                func_g.type = filter_parse_regex(glob, strlen(glob),
3844                                                 &func_g.search, &not);
3845                func_g.len = strlen(func_g.search);
3846                func_g.search = glob;
3847
3848                /* we do not support '!' for function probes */
3849                if (WARN_ON(not))
3850                        return;
3851        }
3852
3853        mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3854
3855        old_hash_ops.filter_hash = old_hash;
3856        /* Probes only have filters */
3857        old_hash_ops.notrace_hash = NULL;
3858
3859        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3860        if (!hash)
3861                /* Hmm, should report this somehow */
3862                goto out_unlock;
3863
3864        INIT_LIST_HEAD(&free_list);
3865
3866        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3867                struct hlist_head *hhd = &ftrace_func_hash[i];
3868
3869                hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3870
3871                        /* break up if statements for readability */
3872                        if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3873                                continue;
3874
3875                        if ((flags & PROBE_TEST_DATA) && entry->data != data)
3876                                continue;
3877
3878                        /* do this last, since it is the most expensive */
3879                        if (func_g.search) {
3880                                kallsyms_lookup(entry->ip, NULL, NULL,
3881                                                NULL, str);
3882                                if (!ftrace_match(str, &func_g))
3883                                        continue;
3884                        }
3885
3886                        rec_entry = ftrace_lookup_ip(hash, entry->ip);
3887                        /* It is possible more than one entry had this ip */
3888                        if (rec_entry)
3889                                free_hash_entry(hash, rec_entry);
3890
3891                        hlist_del_rcu(&entry->node);
3892                        list_add(&entry->free_list, &free_list);
3893                }
3894        }
3895        mutex_lock(&ftrace_lock);
3896        disabled = __disable_ftrace_function_probe();
3897        /*
3898         * Remove after the disable is called. Otherwise, if the last
3899         * probe is removed, a null hash means *all enabled*.
3900         */
3901        ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3902
3903        /* still need to update the function call sites */
3904        if (ftrace_enabled && !disabled)
3905                ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3906                                       &old_hash_ops);
3907        synchronize_sched();
3908        if (!ret)
3909                free_ftrace_hash_rcu(old_hash);
3910
3911        list_for_each_entry_safe(entry, p, &free_list, free_list) {
3912                list_del(&entry->free_list);
3913                ftrace_free_entry(entry);
3914        }
3915        mutex_unlock(&ftrace_lock);
3916
3917 out_unlock:
3918        mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3919        free_ftrace_hash(hash);
3920}
3921
3922void
3923unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3924                                void *data)
3925{
3926        __unregister_ftrace_function_probe(glob, ops, data,
3927                                          PROBE_TEST_FUNC | PROBE_TEST_DATA);
3928}
3929
3930void
3931unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3932{
3933        __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3934}
3935
3936void unregister_ftrace_function_probe_all(char *glob)
3937{
3938        __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3939}
3940
3941static LIST_HEAD(ftrace_commands);
3942static DEFINE_MUTEX(ftrace_cmd_mutex);
3943
3944/*
3945 * Currently we only register ftrace commands from __init, so mark this
3946 * __init too.
3947 */
3948__init int register_ftrace_command(struct ftrace_func_command *cmd)
3949{
3950        struct ftrace_func_command *p;
3951        int ret = 0;
3952
3953        mutex_lock(&ftrace_cmd_mutex);
3954        list_for_each_entry(p, &ftrace_commands, list) {
3955                if (strcmp(cmd->name, p->name) == 0) {
3956                        ret = -EBUSY;
3957                        goto out_unlock;
3958                }
3959        }
3960        list_add(&cmd->list, &ftrace_commands);
3961 out_unlock:
3962        mutex_unlock(&ftrace_cmd_mutex);
3963
3964        return ret;
3965}
3966
3967/*
3968 * Currently we only unregister ftrace commands from __init, so mark
3969 * this __init too.
3970 */
3971__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
3972{
3973        struct ftrace_func_command *p, *n;
3974        int ret = -ENODEV;
3975
3976        mutex_lock(&ftrace_cmd_mutex);
3977        list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3978                if (strcmp(cmd->name, p->name) == 0) {
3979                        ret = 0;
3980                        list_del_init(&p->list);
3981                        goto out_unlock;
3982                }
3983        }
3984 out_unlock:
3985        mutex_unlock(&ftrace_cmd_mutex);
3986
3987        return ret;
3988}
3989
3990static int ftrace_process_regex(struct ftrace_hash *hash,
3991                                char *buff, int len, int enable)
3992{
3993        char *func, *command, *next = buff;
3994        struct ftrace_func_command *p;
3995        int ret = -EINVAL;
3996
3997        func = strsep(&next, ":");
3998
3999        if (!next) {
4000                ret = ftrace_match_records(hash, func, len);
4001                if (!ret)
4002                        ret = -EINVAL;
4003                if (ret < 0)
4004                        return ret;
4005                return 0;
4006        }
4007
4008        /* command found */
4009
4010        command = strsep(&next, ":");
4011
4012        mutex_lock(&ftrace_cmd_mutex);
4013        list_for_each_entry(p, &ftrace_commands, list) {
4014                if (strcmp(p->name, command) == 0) {
4015                        ret = p->func(hash, func, command, next, enable);
4016                        goto out_unlock;
4017                }
4018        }
4019 out_unlock:
4020        mutex_unlock(&ftrace_cmd_mutex);
4021
4022        return ret;
4023}
4024
4025static ssize_t
4026ftrace_regex_write(struct file *file, const char __user *ubuf,
4027                   size_t cnt, loff_t *ppos, int enable)
4028{
4029        struct ftrace_iterator *iter;
4030        struct trace_parser *parser;
4031        ssize_t ret, read;
4032
4033        if (!cnt)
4034                return 0;
4035
4036        if (file->f_mode & FMODE_READ) {
4037                struct seq_file *m = file->private_data;
4038                iter = m->private;
4039        } else
4040                iter = file->private_data;
4041
4042        if (unlikely(ftrace_disabled))
4043                return -ENODEV;
4044
4045        /* iter->hash is a local copy, so we don't need regex_lock */
4046
4047        parser = &iter->parser;
4048        read = trace_get_user(parser, ubuf, cnt, ppos);
4049
4050        if (read >= 0 && trace_parser_loaded(parser) &&
4051            !trace_parser_cont(parser)) {
4052                ret = ftrace_process_regex(iter->hash, parser->buffer,
4053                                           parser->idx, enable);
4054                trace_parser_clear(parser);
4055                if (ret < 0)
4056                        goto out;
4057        }
4058
4059        ret = read;
4060 out:
4061        return ret;
4062}
4063
4064ssize_t
4065ftrace_filter_write(struct file *file, const char __user *ubuf,
4066                    size_t cnt, loff_t *ppos)
4067{
4068        return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
4069}
4070
4071ssize_t
4072ftrace_notrace_write(struct file *file, const char __user *ubuf,
4073                     size_t cnt, loff_t *ppos)
4074{
4075        return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
4076}
4077
4078static int
4079ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4080{
4081        struct ftrace_func_entry *entry;
4082
4083        if (!ftrace_location(ip))
4084                return -EINVAL;
4085
4086        if (remove) {
4087                entry = ftrace_lookup_ip(hash, ip);
4088                if (!entry)
4089                        return -ENOENT;
4090                free_hash_entry(hash, entry);
4091                return 0;
4092        }
4093
4094        return add_hash_entry(hash, ip);
4095}
4096
4097static void ftrace_ops_update_code(struct ftrace_ops *ops,
4098                                   struct ftrace_ops_hash *old_hash)
4099{
4100        struct ftrace_ops *op;
4101
4102        if (!ftrace_enabled)
4103                return;
4104
4105        if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4106                ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4107                return;
4108        }
4109
4110        /*
4111         * If this is the shared global_ops filter, then we need to
4112         * check if there is another ops that shares it, is enabled.
4113         * If so, we still need to run the modify code.
4114         */
4115        if (ops->func_hash != &global_ops.local_hash)
4116                return;
4117
4118        do_for_each_ftrace_op(op, ftrace_ops_list) {
4119                if (op->func_hash == &global_ops.local_hash &&
4120                    op->flags & FTRACE_OPS_FL_ENABLED) {
4121                        ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4122                        /* Only need to do this once */
4123                        return;
4124                }
4125        } while_for_each_ftrace_op(op);
4126}
4127
4128static int
4129ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4130                unsigned long ip, int remove, int reset, int enable)
4131{
4132        struct ftrace_hash **orig_hash;
4133        struct ftrace_ops_hash old_hash_ops;
4134        struct ftrace_hash *old_hash;
4135        struct ftrace_hash *hash;
4136        int ret;
4137
4138        if (unlikely(ftrace_disabled))
4139                return -ENODEV;
4140
4141        mutex_lock(&ops->func_hash->regex_lock);
4142
4143        if (enable)
4144                orig_hash = &ops->func_hash->filter_hash;
4145        else
4146                orig_hash = &ops->func_hash->notrace_hash;
4147
4148        if (reset)
4149                hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4150        else
4151                hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
4152
4153        if (!hash) {
4154                ret = -ENOMEM;
4155                goto out_regex_unlock;
4156        }
4157
4158        if (buf && !ftrace_match_records(hash, buf, len)) {
4159                ret = -EINVAL;
4160                goto out_regex_unlock;
4161        }
4162        if (ip) {
4163                ret = ftrace_match_addr(hash, ip, remove);
4164                if (ret < 0)
4165                        goto out_regex_unlock;
4166        }
4167
4168        mutex_lock(&ftrace_lock);
4169        old_hash = *orig_hash;
4170        old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4171        old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4172        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4173        if (!ret) {
4174                ftrace_ops_update_code(ops, &old_hash_ops);
4175                free_ftrace_hash_rcu(old_hash);
4176        }
4177        mutex_unlock(&ftrace_lock);
4178
4179 out_regex_unlock:
4180        mutex_unlock(&ops->func_hash->regex_lock);
4181
4182        free_ftrace_hash(hash);
4183        return ret;
4184}
4185
4186static int
4187ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
4188                int reset, int enable)
4189{
4190        return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
4191}
4192
4193/**
4194 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
4195 * @ops - the ops to set the filter with
4196 * @ip - the address to add to or remove from the filter.
4197 * @remove - non zero to remove the ip from the filter
4198 * @reset - non zero to reset all filters before applying this filter.
4199 *
4200 * Filters denote which functions should be enabled when tracing is enabled
4201 * If @ip is NULL, it failes to update filter.
4202 */
4203int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
4204                         int remove, int reset)
4205{
4206        ftrace_ops_init(ops);
4207        return ftrace_set_addr(ops, ip, remove, reset, 1);
4208}
4209EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
4210
4211static int
4212ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4213                 int reset, int enable)
4214{
4215        return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
4216}
4217
4218/**
4219 * ftrace_set_filter - set a function to filter on in ftrace
4220 * @ops - the ops to set the filter with
4221 * @buf - the string that holds the function filter text.
4222 * @len - the length of the string.
4223 * @reset - non zero to reset all filters before applying this filter.
4224 *
4225 * Filters denote which functions should be enabled when tracing is enabled.
4226 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4227 */
4228int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
4229                       int len, int reset)
4230{
4231        ftrace_ops_init(ops);
4232        return ftrace_set_regex(ops, buf, len, reset, 1);
4233}
4234EXPORT_SYMBOL_GPL(ftrace_set_filter);
4235
4236/**
4237 * ftrace_set_notrace - set a function to not trace in ftrace
4238 * @ops - the ops to set the notrace filter with
4239 * @buf - the string that holds the function notrace text.
4240 * @len - the length of the string.
4241 * @reset - non zero to reset all filters before applying this filter.
4242 *
4243 * Notrace Filters denote which functions should not be enabled when tracing
4244 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4245 * for tracing.
4246 */
4247int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
4248                        int len, int reset)
4249{
4250        ftrace_ops_init(ops);
4251        return ftrace_set_regex(ops, buf, len, reset, 0);
4252}
4253EXPORT_SYMBOL_GPL(ftrace_set_notrace);
4254/**
4255 * ftrace_set_global_filter - set a function to filter on with global tracers
4256 * @buf - the string that holds the function filter text.
4257 * @len - the length of the string.
4258 * @reset - non zero to reset all filters before applying this filter.
4259 *
4260 * Filters denote which functions should be enabled when tracing is enabled.
4261 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4262 */
4263void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
4264{
4265        ftrace_set_regex(&global_ops, buf, len, reset, 1);
4266}
4267EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4268
4269/**
4270 * ftrace_set_global_notrace - set a function to not trace with global tracers
4271 * @buf - the string that holds the function notrace text.
4272 * @len - the length of the string.
4273 * @reset - non zero to reset all filters before applying this filter.
4274 *
4275 * Notrace Filters denote which functions should not be enabled when tracing
4276 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4277 * for tracing.
4278 */
4279void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
4280{
4281        ftrace_set_regex(&global_ops, buf, len, reset, 0);
4282}
4283EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
4284
4285/*
4286 * command line interface to allow users to set filters on boot up.
4287 */
4288#define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
4289static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4290static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
4291
4292/* Used by function selftest to not test if filter is set */
4293bool ftrace_filter_param __initdata;
4294
4295static int __init set_ftrace_notrace(char *str)
4296{
4297        ftrace_filter_param = true;
4298        strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
4299        return 1;
4300}
4301__setup("ftrace_notrace=", set_ftrace_notrace);
4302
4303static int __init set_ftrace_filter(char *str)
4304{
4305        ftrace_filter_param = true;
4306        strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
4307        return 1;
4308}
4309__setup("ftrace_filter=", set_ftrace_filter);
4310
4311#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4312static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
4313static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4314static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
4315
4316static unsigned long save_global_trampoline;
4317static unsigned long save_global_flags;
4318
4319static int __init set_graph_function(char *str)
4320{
4321        strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
4322        return 1;
4323}
4324__setup("ftrace_graph_filter=", set_graph_function);
4325
4326static int __init set_graph_notrace_function(char *str)
4327{
4328        strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
4329        return 1;
4330}
4331__setup("ftrace_graph_notrace=", set_graph_notrace_function);
4332
4333static void __init set_ftrace_early_graph(char *buf, int enable)
4334{
4335        int ret;
4336        char *func;
4337        unsigned long *table = ftrace_graph_funcs;
4338        int *count = &ftrace_graph_count;
4339
4340        if (!enable) {
4341                table = ftrace_graph_notrace_funcs;
4342                count = &ftrace_graph_notrace_count;
4343        }
4344
4345        while (buf) {
4346                func = strsep(&buf, ",");
4347                /* we allow only one expression at a time */
4348                ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
4349                if (ret)
4350                        printk(KERN_DEBUG "ftrace: function %s not "
4351                                          "traceable\n", func);
4352        }
4353}
4354#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4355
4356void __init
4357ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
4358{
4359        char *func;
4360
4361        ftrace_ops_init(ops);
4362
4363        while (buf) {
4364                func = strsep(&buf, ",");
4365                ftrace_set_regex(ops, func, strlen(func), 0, enable);
4366        }
4367}
4368
4369static void __init set_ftrace_early_filters(void)
4370{
4371        if (ftrace_filter_buf[0])
4372                ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
4373        if (ftrace_notrace_buf[0])
4374                ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
4375#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4376        if (ftrace_graph_buf[0])
4377                set_ftrace_early_graph(ftrace_graph_buf, 1);
4378        if (ftrace_graph_notrace_buf[0])
4379                set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
4380#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4381}
4382
4383int ftrace_regex_release(struct inode *inode, struct file *file)
4384{
4385        struct seq_file *m = (struct seq_file *)file->private_data;
4386        struct ftrace_ops_hash old_hash_ops;
4387        struct ftrace_iterator *iter;
4388        struct ftrace_hash **orig_hash;
4389        struct ftrace_hash *old_hash;
4390        struct trace_parser *parser;
4391        int filter_hash;
4392        int ret;
4393
4394        if (file->f_mode & FMODE_READ) {
4395                iter = m->private;
4396                seq_release(inode, file);
4397        } else
4398                iter = file->private_data;
4399
4400        parser = &iter->parser;
4401        if (trace_parser_loaded(parser)) {
4402                parser->buffer[parser->idx] = 0;
4403                ftrace_match_records(iter->hash, parser->buffer, parser->idx);
4404        }
4405
4406        trace_parser_put(parser);
4407
4408        mutex_lock(&iter->ops->func_hash->regex_lock);
4409
4410        if (file->f_mode & FMODE_WRITE) {
4411                filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
4412
4413                if (filter_hash)
4414                        orig_hash = &iter->ops->func_hash->filter_hash;
4415                else
4416                        orig_hash = &iter->ops->func_hash->notrace_hash;
4417
4418                mutex_lock(&ftrace_lock);
4419                old_hash = *orig_hash;
4420                old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
4421                old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
4422                ret = ftrace_hash_move(iter->ops, filter_hash,
4423                                       orig_hash, iter->hash);
4424                if (!ret) {
4425                        ftrace_ops_update_code(iter->ops, &old_hash_ops);
4426                        free_ftrace_hash_rcu(old_hash);
4427                }
4428                mutex_unlock(&ftrace_lock);
4429        }
4430
4431        mutex_unlock(&iter->ops->func_hash->regex_lock);
4432        free_ftrace_hash(iter->hash);
4433        kfree(iter);
4434
4435        return 0;
4436}
4437
4438static const struct file_operations ftrace_avail_fops = {
4439        .open = ftrace_avail_open,
4440        .read = seq_read,
4441        .llseek = seq_lseek,
4442        .release = seq_release_private,
4443};
4444
4445static const struct file_operations ftrace_enabled_fops = {
4446        .open = ftrace_enabled_open,
4447        .read = seq_read,
4448        .llseek = seq_lseek,
4449        .release = seq_release_private,
4450};
4451
4452static const struct file_operations ftrace_filter_fops = {
4453        .open = ftrace_filter_open,
4454        .read = seq_read,
4455        .write = ftrace_filter_write,
4456        .llseek = tracing_lseek,
4457        .release = ftrace_regex_release,
4458};
4459
4460static const struct file_operations ftrace_notrace_fops = {
4461        .open = ftrace_notrace_open,
4462        .read = seq_read,
4463        .write = ftrace_notrace_write,
4464        .llseek = tracing_lseek,
4465        .release = ftrace_regex_release,
4466};
4467
4468#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4469
4470static DEFINE_MUTEX(graph_lock);
4471
4472int ftrace_graph_count;
4473int ftrace_graph_notrace_count;
4474unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4475unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4476
4477struct ftrace_graph_data {
4478        unsigned long *table;
4479        size_t size;
4480        int *count;
4481        const struct seq_operations *seq_ops;
4482};
4483
4484static void *
4485__g_next(struct seq_file *m, loff_t *pos)
4486{
4487        struct ftrace_graph_data *fgd = m->private;
4488
4489        if (*pos >= *fgd->count)
4490                return NULL;
4491        return &fgd->table[*pos];
4492}
4493
4494static void *
4495g_next(struct seq_file *m, void *v, loff_t *pos)
4496{
4497        (*pos)++;
4498        return __g_next(m, pos);
4499}
4500
4501static void *g_start(struct seq_file *m, loff_t *pos)
4502{
4503        struct ftrace_graph_data *fgd = m->private;
4504
4505        mutex_lock(&graph_lock);
4506
4507        /* Nothing, tell g_show to print all functions are enabled */
4508        if (!*fgd->count && !*pos)
4509                return (void *)1;
4510
4511        return __g_next(m, pos);
4512}
4513
4514static void g_stop(struct seq_file *m, void *p)
4515{
4516        mutex_unlock(&graph_lock);
4517}
4518
4519static int g_show(struct seq_file *m, void *v)
4520{
4521        unsigned long *ptr = v;
4522
4523        if (!ptr)
4524                return 0;
4525
4526        if (ptr == (unsigned long *)1) {
4527                struct ftrace_graph_data *fgd = m->private;
4528
4529                if (fgd->table == ftrace_graph_funcs)
4530                        seq_puts(m, "#### all functions enabled ####\n");
4531                else
4532                        seq_puts(m, "#### no functions disabled ####\n");
4533                return 0;
4534        }
4535
4536        seq_printf(m, "%ps\n", (void *)*ptr);
4537
4538        return 0;
4539}
4540
4541static const struct seq_operations ftrace_graph_seq_ops = {
4542        .start = g_start,
4543        .next = g_next,
4544        .stop = g_stop,
4545        .show = g_show,
4546};
4547
4548static int
4549__ftrace_graph_open(struct inode *inode, struct file *file,
4550                    struct ftrace_graph_data *fgd)
4551{
4552        int ret = 0;
4553
4554        mutex_lock(&graph_lock);
4555        if ((file->f_mode & FMODE_WRITE) &&
4556            (file->f_flags & O_TRUNC)) {
4557                *fgd->count = 0;
4558                memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
4559        }
4560        mutex_unlock(&graph_lock);
4561
4562        if (file->f_mode & FMODE_READ) {
4563                ret = seq_open(file, fgd->seq_ops);
4564                if (!ret) {
4565                        struct seq_file *m = file->private_data;
4566                        m->private = fgd;
4567                }
4568        } else
4569                file->private_data = fgd;
4570
4571        return ret;
4572}
4573
4574static int
4575ftrace_graph_open(struct inode *inode, struct file *file)
4576{
4577        struct ftrace_graph_data *fgd;
4578
4579        if (unlikely(ftrace_disabled))
4580                return -ENODEV;
4581
4582        fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4583        if (fgd == NULL)
4584                return -ENOMEM;
4585
4586        fgd->table = ftrace_graph_funcs;
4587        fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4588        fgd->count = &ftrace_graph_count;
4589        fgd->seq_ops = &ftrace_graph_seq_ops;
4590
4591        return __ftrace_graph_open(inode, file, fgd);
4592}
4593
4594static int
4595ftrace_graph_notrace_open(struct inode *inode, struct file *file)
4596{
4597        struct ftrace_graph_data *fgd;
4598
4599        if (unlikely(ftrace_disabled))
4600                return -ENODEV;
4601
4602        fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4603        if (fgd == NULL)
4604                return -ENOMEM;
4605
4606        fgd->table = ftrace_graph_notrace_funcs;
4607        fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4608        fgd->count = &ftrace_graph_notrace_count;
4609        fgd->seq_ops = &ftrace_graph_seq_ops;
4610
4611        return __ftrace_graph_open(inode, file, fgd);
4612}
4613
4614static int
4615ftrace_graph_release(struct inode *inode, struct file *file)
4616{
4617        if (file->f_mode & FMODE_READ) {
4618                struct seq_file *m = file->private_data;
4619
4620                kfree(m->private);
4621                seq_release(inode, file);
4622        } else {
4623                kfree(file->private_data);
4624        }
4625
4626        return 0;
4627}
4628
4629static int
4630ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
4631{
4632        struct ftrace_glob func_g;
4633        struct dyn_ftrace *rec;
4634        struct ftrace_page *pg;
4635        int fail = 1;
4636        int not;
4637        bool exists;
4638        int i;
4639
4640        /* decode regex */
4641        func_g.type = filter_parse_regex(buffer, strlen(buffer),
4642                                         &func_g.search, &not);
4643        if (!not && *idx >= size)
4644                return -EBUSY;
4645
4646        func_g.len = strlen(func_g.search);
4647
4648        mutex_lock(&ftrace_lock);
4649
4650        if (unlikely(ftrace_disabled)) {
4651                mutex_unlock(&ftrace_lock);
4652                return -ENODEV;
4653        }
4654
4655        do_for_each_ftrace_rec(pg, rec) {
4656
4657                if (ftrace_match_record(rec, &func_g, NULL, 0)) {
4658                        /* if it is in the array */
4659                        exists = false;
4660                        for (i = 0; i < *idx; i++) {
4661                                if (array[i] == rec->ip) {
4662                                        exists = true;
4663                                        break;
4664                                }
4665                        }
4666
4667                        if (!not) {
4668                                fail = 0;
4669                                if (!exists) {
4670                                        array[(*idx)++] = rec->ip;
4671                                        if (*idx >= size)
4672                                                goto out;
4673                                }
4674                        } else {
4675                                if (exists) {
4676                                        array[i] = array[--(*idx)];
4677                                        array[*idx] = 0;
4678                                        fail = 0;
4679                                }
4680                        }
4681                }
4682        } while_for_each_ftrace_rec();
4683out:
4684        mutex_unlock(&ftrace_lock);
4685
4686        if (fail)
4687                return -EINVAL;
4688
4689        return 0;
4690}
4691
4692static ssize_t
4693ftrace_graph_write(struct file *file, const char __user *ubuf,
4694                   size_t cnt, loff_t *ppos)
4695{
4696        struct trace_parser parser;
4697        ssize_t read, ret = 0;
4698        struct ftrace_graph_data *fgd = file->private_data;
4699
4700        if (!cnt)
4701                return 0;
4702
4703        if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
4704                return -ENOMEM;
4705
4706        read = trace_get_user(&parser, ubuf, cnt, ppos);
4707
4708        if (read >= 0 && trace_parser_loaded((&parser))) {
4709                parser.buffer[parser.idx] = 0;
4710
4711                mutex_lock(&graph_lock);
4712
4713                /* we allow only one expression at a time */
4714                ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
4715                                      parser.buffer);
4716
4717                mutex_unlock(&graph_lock);
4718        }
4719
4720        if (!ret)
4721                ret = read;
4722
4723        trace_parser_put(&parser);
4724
4725        return ret;
4726}
4727
4728static const struct file_operations ftrace_graph_fops = {
4729        .open           = ftrace_graph_open,
4730        .read           = seq_read,
4731        .write          = ftrace_graph_write,
4732        .llseek         = tracing_lseek,
4733        .release        = ftrace_graph_release,
4734};
4735
4736static const struct file_operations ftrace_graph_notrace_fops = {
4737        .open           = ftrace_graph_notrace_open,
4738        .read           = seq_read,
4739        .write          = ftrace_graph_write,
4740        .llseek         = tracing_lseek,
4741        .release        = ftrace_graph_release,
4742};
4743#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4744
4745void ftrace_create_filter_files(struct ftrace_ops *ops,
4746                                struct dentry *parent)
4747{
4748
4749        trace_create_file("set_ftrace_filter", 0644, parent,
4750                          ops, &ftrace_filter_fops);
4751
4752        trace_create_file("set_ftrace_notrace", 0644, parent,
4753                          ops, &ftrace_notrace_fops);
4754}
4755
4756/*
4757 * The name "destroy_filter_files" is really a misnomer. Although
4758 * in the future, it may actualy delete the files, but this is
4759 * really intended to make sure the ops passed in are disabled
4760 * and that when this function returns, the caller is free to
4761 * free the ops.
4762 *
4763 * The "destroy" name is only to match the "create" name that this
4764 * should be paired with.
4765 */
4766void ftrace_destroy_filter_files(struct ftrace_ops *ops)
4767{
4768        mutex_lock(&ftrace_lock);
4769        if (ops->flags & FTRACE_OPS_FL_ENABLED)
4770                ftrace_shutdown(ops, 0);
4771        ops->flags |= FTRACE_OPS_FL_DELETED;
4772        mutex_unlock(&ftrace_lock);
4773}
4774
4775static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
4776{
4777
4778        trace_create_file("available_filter_functions", 0444,
4779                        d_tracer, NULL, &ftrace_avail_fops);
4780
4781        trace_create_file("enabled_functions", 0444,
4782                        d_tracer, NULL, &ftrace_enabled_fops);
4783
4784        ftrace_create_filter_files(&global_ops, d_tracer);
4785
4786#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4787        trace_create_file("set_graph_function", 0444, d_tracer,
4788                                    NULL,
4789                                    &ftrace_graph_fops);
4790        trace_create_file("set_graph_notrace", 0444, d_tracer,
4791                                    NULL,
4792                                    &ftrace_graph_notrace_fops);
4793#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4794
4795        return 0;
4796}
4797
4798static int ftrace_cmp_ips(const void *a, const void *b)
4799{
4800        const unsigned long *ipa = a;
4801        const unsigned long *ipb = b;
4802
4803        if (*ipa > *ipb)
4804                return 1;
4805        if (*ipa < *ipb)
4806                return -1;
4807        return 0;
4808}
4809
4810static int ftrace_process_locs(struct module *mod,
4811                               unsigned long *start,
4812                               unsigned long *end)
4813{
4814        struct ftrace_page *start_pg;
4815        struct ftrace_page *pg;
4816        struct dyn_ftrace *rec;
4817        unsigned long count;
4818        unsigned long *p;
4819        unsigned long addr;
4820        unsigned long flags = 0; /* Shut up gcc */
4821        int ret = -ENOMEM;
4822
4823        count = end - start;
4824
4825        if (!count)
4826                return 0;
4827
4828        sort(start, count, sizeof(*start),
4829             ftrace_cmp_ips, NULL);
4830
4831        start_pg = ftrace_allocate_pages(count);
4832        if (!start_pg)
4833                return -ENOMEM;
4834
4835        mutex_lock(&ftrace_lock);
4836
4837        /*
4838         * Core and each module needs their own pages, as
4839         * modules will free them when they are removed.
4840         * Force a new page to be allocated for modules.
4841         */
4842        if (!mod) {
4843                WARN_ON(ftrace_pages || ftrace_pages_start);
4844                /* First initialization */
4845                ftrace_pages = ftrace_pages_start = start_pg;
4846        } else {
4847                if (!ftrace_pages)
4848                        goto out;
4849
4850                if (WARN_ON(ftrace_pages->next)) {
4851                        /* Hmm, we have free pages? */
4852                        while (ftrace_pages->next)
4853                                ftrace_pages = ftrace_pages->next;
4854                }
4855
4856                ftrace_pages->next = start_pg;
4857        }
4858
4859        p = start;
4860        pg = start_pg;
4861        while (p < end) {
4862                addr = ftrace_call_adjust(*p++);
4863                /*
4864                 * Some architecture linkers will pad between
4865                 * the different mcount_loc sections of different
4866                 * object files to satisfy alignments.
4867                 * Skip any NULL pointers.
4868                 */
4869                if (!addr)
4870                        continue;
4871
4872                if (pg->index == pg->size) {
4873                        /* We should have allocated enough */
4874                        if (WARN_ON(!pg->next))
4875                                break;
4876                        pg = pg->next;
4877                }
4878
4879                rec = &pg->records[pg->index++];
4880                rec->ip = addr;
4881        }
4882
4883        /* We should have used all pages */
4884        WARN_ON(pg->next);
4885
4886        /* Assign the last page to ftrace_pages */
4887        ftrace_pages = pg;
4888
4889        /*
4890         * We only need to disable interrupts on start up
4891         * because we are modifying code that an interrupt
4892         * may execute, and the modification is not atomic.
4893         * But for modules, nothing runs the code we modify
4894         * until we are finished with it, and there's no
4895         * reason to cause large interrupt latencies while we do it.
4896         */
4897        if (!mod)
4898                local_irq_save(flags);
4899        ftrace_update_code(mod, start_pg);
4900        if (!mod)
4901                local_irq_restore(flags);
4902        ret = 0;
4903 out:
4904        mutex_unlock(&ftrace_lock);
4905
4906        return ret;
4907}
4908
4909#ifdef CONFIG_MODULES
4910
4911#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4912
4913void ftrace_release_mod(struct module *mod)
4914{
4915        struct dyn_ftrace *rec;
4916        struct ftrace_page **last_pg;
4917        struct ftrace_page *pg;
4918        int order;
4919
4920        mutex_lock(&ftrace_lock);
4921
4922        if (ftrace_disabled)
4923                goto out_unlock;
4924
4925        /*
4926         * Each module has its own ftrace_pages, remove
4927         * them from the list.
4928         */
4929        last_pg = &ftrace_pages_start;
4930        for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4931                rec = &pg->records[0];
4932                if (within_module_core(rec->ip, mod)) {
4933                        /*
4934                         * As core pages are first, the first
4935                         * page should never be a module page.
4936                         */
4937                        if (WARN_ON(pg == ftrace_pages_start))
4938                                goto out_unlock;
4939
4940                        /* Check if we are deleting the last page */
4941                        if (pg == ftrace_pages)
4942                                ftrace_pages = next_to_ftrace_page(last_pg);
4943
4944                        *last_pg = pg->next;
4945                        order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4946                        free_pages((unsigned long)pg->records, order);
4947                        kfree(pg);
4948                } else
4949                        last_pg = &pg->next;
4950        }
4951 out_unlock:
4952        mutex_unlock(&ftrace_lock);
4953}
4954
4955static void ftrace_init_module(struct module *mod,
4956                               unsigned long *start, unsigned long *end)
4957{
4958        if (ftrace_disabled || start == end)
4959                return;
4960        ftrace_process_locs(mod, start, end);
4961}
4962
4963void ftrace_module_init(struct module *mod)
4964{
4965        ftrace_init_module(mod, mod->ftrace_callsites,
4966                           mod->ftrace_callsites +
4967                           mod->num_ftrace_callsites);
4968}
4969
4970static int ftrace_module_notify_exit(struct notifier_block *self,
4971                                     unsigned long val, void *data)
4972{
4973        struct module *mod = data;
4974
4975        if (val == MODULE_STATE_GOING)
4976                ftrace_release_mod(mod);
4977
4978        return 0;
4979}
4980#else
4981static int ftrace_module_notify_exit(struct notifier_block *self,
4982                                     unsigned long val, void *data)
4983{
4984        return 0;
4985}
4986#endif /* CONFIG_MODULES */
4987
4988struct notifier_block ftrace_module_exit_nb = {
4989        .notifier_call = ftrace_module_notify_exit,
4990        .priority = INT_MIN,    /* Run after anything that can remove kprobes */
4991};
4992
4993void __init ftrace_init(void)
4994{
4995        extern unsigned long __start_mcount_loc[];
4996        extern unsigned long __stop_mcount_loc[];
4997        unsigned long count, flags;
4998        int ret;
4999
5000        local_irq_save(flags);
5001        ret = ftrace_dyn_arch_init();
5002        local_irq_restore(flags);
5003        if (ret)
5004                goto failed;
5005
5006        count = __stop_mcount_loc - __start_mcount_loc;
5007        if (!count) {
5008                pr_info("ftrace: No functions to be traced?\n");
5009                goto failed;
5010        }
5011
5012        pr_info("ftrace: allocating %ld entries in %ld pages\n",
5013                count, count / ENTRIES_PER_PAGE + 1);
5014
5015        last_ftrace_enabled = ftrace_enabled = 1;
5016
5017        ret = ftrace_process_locs(NULL,
5018                                  __start_mcount_loc,
5019                                  __stop_mcount_loc);
5020
5021        ret = register_module_notifier(&ftrace_module_exit_nb);
5022        if (ret)
5023                pr_warning("Failed to register trace ftrace module exit notifier\n");
5024
5025        set_ftrace_early_filters();
5026
5027        return;
5028 failed:
5029        ftrace_disabled = 1;
5030}
5031
5032/* Do nothing if arch does not support this */
5033void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
5034{
5035}
5036
5037static void ftrace_update_trampoline(struct ftrace_ops *ops)
5038{
5039
5040/*
5041 * Currently there's no safe way to free a trampoline when the kernel
5042 * is configured with PREEMPT. That is because a task could be preempted
5043 * when it jumped to the trampoline, it may be preempted for a long time
5044 * depending on the system load, and currently there's no way to know
5045 * when it will be off the trampoline. If the trampoline is freed
5046 * too early, when the task runs again, it will be executing on freed
5047 * memory and crash.
5048 */
5049#ifdef CONFIG_PREEMPT
5050        /* Currently, only non dynamic ops can have a trampoline */
5051        if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
5052                return;
5053#endif
5054
5055        arch_ftrace_update_trampoline(ops);
5056}
5057
5058#else
5059
5060static struct ftrace_ops global_ops = {
5061        .func                   = ftrace_stub,
5062        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
5063                                  FTRACE_OPS_FL_INITIALIZED |
5064                                  FTRACE_OPS_FL_PID,
5065};
5066
5067static int __init ftrace_nodyn_init(void)
5068{
5069        ftrace_enabled = 1;
5070        return 0;
5071}
5072core_initcall(ftrace_nodyn_init);
5073
5074static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
5075static inline void ftrace_startup_enable(int command) { }
5076static inline void ftrace_startup_all(int command) { }
5077/* Keep as macros so we do not need to define the commands */
5078# define ftrace_startup(ops, command)                                   \
5079        ({                                                              \
5080                int ___ret = __register_ftrace_function(ops);           \
5081                if (!___ret)                                            \
5082                        (ops)->flags |= FTRACE_OPS_FL_ENABLED;          \
5083                ___ret;                                                 \
5084        })
5085# define ftrace_shutdown(ops, command)                                  \
5086        ({                                                              \
5087                int ___ret = __unregister_ftrace_function(ops);         \
5088                if (!___ret)                                            \
5089                        (ops)->flags &= ~FTRACE_OPS_FL_ENABLED;         \
5090                ___ret;                                                 \
5091        })
5092
5093# define ftrace_startup_sysctl()        do { } while (0)
5094# define ftrace_shutdown_sysctl()       do { } while (0)
5095
5096static inline int
5097ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
5098{
5099        return 1;
5100}
5101
5102static void ftrace_update_trampoline(struct ftrace_ops *ops)
5103{
5104}
5105
5106#endif /* CONFIG_DYNAMIC_FTRACE */
5107
5108__init void ftrace_init_global_array_ops(struct trace_array *tr)
5109{
5110        tr->ops = &global_ops;
5111        tr->ops->private = tr;
5112}
5113
5114void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
5115{
5116        /* If we filter on pids, update to use the pid function */
5117        if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
5118                if (WARN_ON(tr->ops->func != ftrace_stub))
5119                        printk("ftrace ops had %pS for function\n",
5120                               tr->ops->func);
5121        }
5122        tr->ops->func = func;
5123        tr->ops->private = tr;
5124}
5125
5126void ftrace_reset_array_ops(struct trace_array *tr)
5127{
5128        tr->ops->func = ftrace_stub;
5129}
5130
5131static void
5132ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
5133                        struct ftrace_ops *op, struct pt_regs *regs)
5134{
5135        if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
5136                return;
5137
5138        /*
5139         * Some of the ops may be dynamically allocated,
5140         * they must be freed after a synchronize_sched().
5141         */
5142        preempt_disable_notrace();
5143        trace_recursion_set(TRACE_CONTROL_BIT);
5144
5145        /*
5146         * Control funcs (perf) uses RCU. Only trace if
5147         * RCU is currently active.
5148         */
5149        if (!rcu_is_watching())
5150                goto out;
5151
5152        do_for_each_ftrace_op(op, ftrace_control_list) {
5153                if (!(op->flags & FTRACE_OPS_FL_STUB) &&
5154                    !ftrace_function_local_disabled(op) &&
5155                    ftrace_ops_test(op, ip, regs))
5156                        op->func(ip, parent_ip, op, regs);
5157        } while_for_each_ftrace_op(op);
5158 out:
5159        trace_recursion_clear(TRACE_CONTROL_BIT);
5160        preempt_enable_notrace();
5161}
5162
5163static struct ftrace_ops control_ops = {
5164        .func   = ftrace_ops_control_func,
5165        .flags  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
5166        INIT_OPS_HASH(control_ops)
5167};
5168
5169static inline void
5170__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
5171                       struct ftrace_ops *ignored, struct pt_regs *regs)
5172{
5173        struct ftrace_ops *op;
5174        int bit;
5175
5176        bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
5177        if (bit < 0)
5178                return;
5179
5180        /*
5181         * Some of the ops may be dynamically allocated,
5182         * they must be freed after a synchronize_sched().
5183         */
5184        preempt_disable_notrace();
5185        do_for_each_ftrace_op(op, ftrace_ops_list) {
5186                if (ftrace_ops_test(op, ip, regs)) {
5187                        if (FTRACE_WARN_ON(!op->func)) {
5188                                pr_warn("op=%p %pS\n", op, op);
5189                                goto out;
5190                        }
5191                        op->func(ip, parent_ip, op, regs);
5192                }
5193        } while_for_each_ftrace_op(op);
5194out:
5195        preempt_enable_notrace();
5196        trace_clear_recursion(bit);
5197}
5198
5199/*
5200 * Some archs only support passing ip and parent_ip. Even though
5201 * the list function ignores the op parameter, we do not want any
5202 * C side effects, where a function is called without the caller
5203 * sending a third parameter.
5204 * Archs are to support both the regs and ftrace_ops at the same time.
5205 * If they support ftrace_ops, it is assumed they support regs.
5206 * If call backs want to use regs, they must either check for regs
5207 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
5208 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
5209 * An architecture can pass partial regs with ftrace_ops and still
5210 * set the ARCH_SUPPORT_FTARCE_OPS.
5211 */
5212#if ARCH_SUPPORTS_FTRACE_OPS
5213static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
5214                                 struct ftrace_ops *op, struct pt_regs *regs)
5215{
5216        __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
5217}
5218#else
5219static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
5220{
5221        __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
5222}
5223#endif
5224
5225/*
5226 * If there's only one function registered but it does not support
5227 * recursion, this function will be called by the mcount trampoline.
5228 * This function will handle recursion protection.
5229 */
5230static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
5231                                   struct ftrace_ops *op, struct pt_regs *regs)
5232{
5233        int bit;
5234
5235        bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
5236        if (bit < 0)
5237                return;
5238
5239        op->func(ip, parent_ip, op, regs);
5240
5241        trace_clear_recursion(bit);
5242}
5243
5244/**
5245 * ftrace_ops_get_func - get the function a trampoline should call
5246 * @ops: the ops to get the function for
5247 *
5248 * Normally the mcount trampoline will call the ops->func, but there
5249 * are times that it should not. For example, if the ops does not
5250 * have its own recursion protection, then it should call the
5251 * ftrace_ops_recurs_func() instead.
5252 *
5253 * Returns the function that the trampoline should call for @ops.
5254 */
5255ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
5256{
5257        /*
5258         * If the func handles its own recursion, call it directly.
5259         * Otherwise call the recursion protected function that
5260         * will call the ftrace ops function.
5261         */
5262        if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE))
5263                return ftrace_ops_recurs_func;
5264
5265        return ops->func;
5266}
5267
5268static void clear_ftrace_swapper(void)
5269{
5270        struct task_struct *p;
5271        int cpu;
5272
5273        get_online_cpus();
5274        for_each_online_cpu(cpu) {
5275                p = idle_task(cpu);
5276                clear_tsk_trace_trace(p);
5277        }
5278        put_online_cpus();
5279}
5280
5281static void set_ftrace_swapper(void)
5282{
5283        struct task_struct *p;
5284        int cpu;
5285
5286        get_online_cpus();
5287        for_each_online_cpu(cpu) {
5288                p = idle_task(cpu);
5289                set_tsk_trace_trace(p);
5290        }
5291        put_online_cpus();
5292}
5293
5294static void clear_ftrace_pid(struct pid *pid)
5295{
5296        struct task_struct *p;
5297
5298        rcu_read_lock();
5299        do_each_pid_task(pid, PIDTYPE_PID, p) {
5300                clear_tsk_trace_trace(p);
5301        } while_each_pid_task(pid, PIDTYPE_PID, p);
5302        rcu_read_unlock();
5303
5304        put_pid(pid);
5305}
5306
5307static void set_ftrace_pid(struct pid *pid)
5308{
5309        struct task_struct *p;
5310
5311        rcu_read_lock();
5312        do_each_pid_task(pid, PIDTYPE_PID, p) {
5313                set_tsk_trace_trace(p);
5314        } while_each_pid_task(pid, PIDTYPE_PID, p);
5315        rcu_read_unlock();
5316}
5317
5318static void clear_ftrace_pid_task(struct pid *pid)
5319{
5320        if (pid == ftrace_swapper_pid)
5321                clear_ftrace_swapper();
5322        else
5323                clear_ftrace_pid(pid);
5324}
5325
5326static void set_ftrace_pid_task(struct pid *pid)
5327{
5328        if (pid == ftrace_swapper_pid)
5329                set_ftrace_swapper();
5330        else
5331                set_ftrace_pid(pid);
5332}
5333
5334static int ftrace_pid_add(int p)
5335{
5336        struct pid *pid;
5337        struct ftrace_pid *fpid;
5338        int ret = -EINVAL;
5339
5340        mutex_lock(&ftrace_lock);
5341
5342        if (!p)
5343                pid = ftrace_swapper_pid;
5344        else
5345                pid = find_get_pid(p);
5346
5347        if (!pid)
5348                goto out;
5349
5350        ret = 0;
5351
5352        list_for_each_entry(fpid, &ftrace_pids, list)
5353                if (fpid->pid == pid)
5354                        goto out_put;
5355
5356        ret = -ENOMEM;
5357
5358        fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
5359        if (!fpid)
5360                goto out_put;
5361
5362        list_add(&fpid->list, &ftrace_pids);
5363        fpid->pid = pid;
5364
5365        set_ftrace_pid_task(pid);
5366
5367        ftrace_update_pid_func();
5368
5369        ftrace_startup_all(0);
5370
5371        mutex_unlock(&ftrace_lock);
5372        return 0;
5373
5374out_put:
5375        if (pid != ftrace_swapper_pid)
5376                put_pid(pid);
5377
5378out:
5379        mutex_unlock(&ftrace_lock);
5380        return ret;
5381}
5382
5383static void ftrace_pid_reset(void)
5384{
5385        struct ftrace_pid *fpid, *safe;
5386
5387        mutex_lock(&ftrace_lock);
5388        list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
5389                struct pid *pid = fpid->pid;
5390
5391                clear_ftrace_pid_task(pid);
5392
5393                list_del(&fpid->list);
5394                kfree(fpid);
5395        }
5396
5397        ftrace_update_pid_func();
5398        ftrace_startup_all(0);
5399
5400        mutex_unlock(&ftrace_lock);
5401}
5402
5403static void *fpid_start(struct seq_file *m, loff_t *pos)
5404{
5405        mutex_lock(&ftrace_lock);
5406
5407        if (!ftrace_pids_enabled() && (!*pos))
5408                return (void *) 1;
5409
5410        return seq_list_start(&ftrace_pids, *pos);
5411}
5412
5413static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
5414{
5415        if (v == (void *)1)
5416                return NULL;
5417
5418        return seq_list_next(v, &ftrace_pids, pos);
5419}
5420
5421static void fpid_stop(struct seq_file *m, void *p)
5422{
5423        mutex_unlock(&ftrace_lock);
5424}
5425
5426static int fpid_show(struct seq_file *m, void *v)
5427{
5428        const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
5429
5430        if (v == (void *)1) {
5431                seq_puts(m, "no pid\n");
5432                return 0;
5433        }
5434
5435        if (fpid->pid == ftrace_swapper_pid)
5436                seq_puts(m, "swapper tasks\n");
5437        else
5438                seq_printf(m, "%u\n", pid_vnr(fpid->pid));
5439
5440        return 0;
5441}
5442
5443static const struct seq_operations ftrace_pid_sops = {
5444        .start = fpid_start,
5445        .next = fpid_next,
5446        .stop = fpid_stop,
5447        .show = fpid_show,
5448};
5449
5450static int
5451ftrace_pid_open(struct inode *inode, struct file *file)
5452{
5453        int ret = 0;
5454
5455        if ((file->f_mode & FMODE_WRITE) &&
5456            (file->f_flags & O_TRUNC))
5457                ftrace_pid_reset();
5458
5459        if (file->f_mode & FMODE_READ)
5460                ret = seq_open(file, &ftrace_pid_sops);
5461
5462        return ret;
5463}
5464
5465static ssize_t
5466ftrace_pid_write(struct file *filp, const char __user *ubuf,
5467                   size_t cnt, loff_t *ppos)
5468{
5469        char buf[64], *tmp;
5470        long val;
5471        int ret;
5472
5473        if (cnt >= sizeof(buf))
5474                return -EINVAL;
5475
5476        if (copy_from_user(&buf, ubuf, cnt))
5477                return -EFAULT;
5478
5479        buf[cnt] = 0;
5480
5481        /*
5482         * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
5483         * to clean the filter quietly.
5484         */
5485        tmp = strstrip(buf);
5486        if (strlen(tmp) == 0)
5487                return 1;
5488
5489        ret = kstrtol(tmp, 10, &val);
5490        if (ret < 0)
5491                return ret;
5492
5493        ret = ftrace_pid_add(val);
5494
5495        return ret ? ret : cnt;
5496}
5497
5498static int
5499ftrace_pid_release(struct inode *inode, struct file *file)
5500{
5501        if (file->f_mode & FMODE_READ)
5502                seq_release(inode, file);
5503
5504        return 0;
5505}
5506
5507static const struct file_operations ftrace_pid_fops = {
5508        .open           = ftrace_pid_open,
5509        .write          = ftrace_pid_write,
5510        .read           = seq_read,
5511        .llseek         = tracing_lseek,
5512        .release        = ftrace_pid_release,
5513};
5514
5515static __init int ftrace_init_tracefs(void)
5516{
5517        struct dentry *d_tracer;
5518
5519        d_tracer = tracing_init_dentry();
5520        if (IS_ERR(d_tracer))
5521                return 0;
5522
5523        ftrace_init_dyn_tracefs(d_tracer);
5524
5525        trace_create_file("set_ftrace_pid", 0644, d_tracer,
5526                            NULL, &ftrace_pid_fops);
5527
5528        ftrace_profile_tracefs(d_tracer);
5529
5530        return 0;
5531}
5532fs_initcall(ftrace_init_tracefs);
5533
5534/**
5535 * ftrace_kill - kill ftrace
5536 *
5537 * This function should be used by panic code. It stops ftrace
5538 * but in a not so nice way. If you need to simply kill ftrace
5539 * from a non-atomic section, use ftrace_kill.
5540 */
5541void ftrace_kill(void)
5542{
5543        ftrace_disabled = 1;
5544        ftrace_enabled = 0;
5545        clear_ftrace_function();
5546}
5547
5548/**
5549 * Test if ftrace is dead or not.
5550 */
5551int ftrace_is_dead(void)
5552{
5553        return ftrace_disabled;
5554}
5555
5556/**
5557 * register_ftrace_function - register a function for profiling
5558 * @ops - ops structure that holds the function for profiling.
5559 *
5560 * Register a function to be called by all functions in the
5561 * kernel.
5562 *
5563 * Note: @ops->func and all the functions it calls must be labeled
5564 *       with "notrace", otherwise it will go into a
5565 *       recursive loop.
5566 */
5567int register_ftrace_function(struct ftrace_ops *ops)
5568{
5569        int ret = -1;
5570
5571        ftrace_ops_init(ops);
5572
5573        mutex_lock(&ftrace_lock);
5574
5575        ret = ftrace_startup(ops, 0);
5576
5577        mutex_unlock(&ftrace_lock);
5578
5579        return ret;
5580}
5581EXPORT_SYMBOL_GPL(register_ftrace_function);
5582
5583/**
5584 * unregister_ftrace_function - unregister a function for profiling.
5585 * @ops - ops structure that holds the function to unregister
5586 *
5587 * Unregister a function that was added to be called by ftrace profiling.
5588 */
5589int unregister_ftrace_function(struct ftrace_ops *ops)
5590{
5591        int ret;
5592
5593        mutex_lock(&ftrace_lock);
5594        ret = ftrace_shutdown(ops, 0);
5595        mutex_unlock(&ftrace_lock);
5596
5597        return ret;
5598}
5599EXPORT_SYMBOL_GPL(unregister_ftrace_function);
5600
5601int
5602ftrace_enable_sysctl(struct ctl_table *table, int write,
5603                     void __user *buffer, size_t *lenp,
5604                     loff_t *ppos)
5605{
5606        int ret = -ENODEV;
5607
5608        mutex_lock(&ftrace_lock);
5609
5610        if (unlikely(ftrace_disabled))
5611                goto out;
5612
5613        ret = proc_dointvec(table, write, buffer, lenp, ppos);
5614
5615        if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
5616                goto out;
5617
5618        last_ftrace_enabled = !!ftrace_enabled;
5619
5620        if (ftrace_enabled) {
5621
5622                /* we are starting ftrace again */
5623                if (ftrace_ops_list != &ftrace_list_end)
5624                        update_ftrace_function();
5625
5626                ftrace_startup_sysctl();
5627
5628        } else {
5629                /* stopping ftrace calls (just send to ftrace_stub) */
5630                ftrace_trace_function = ftrace_stub;
5631
5632                ftrace_shutdown_sysctl();
5633        }
5634
5635 out:
5636        mutex_unlock(&ftrace_lock);
5637        return ret;
5638}
5639
5640#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5641
5642static struct ftrace_ops graph_ops = {
5643        .func                   = ftrace_stub,
5644        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
5645                                   FTRACE_OPS_FL_INITIALIZED |
5646                                   FTRACE_OPS_FL_PID |
5647                                   FTRACE_OPS_FL_STUB,
5648#ifdef FTRACE_GRAPH_TRAMP_ADDR
5649        .trampoline             = FTRACE_GRAPH_TRAMP_ADDR,
5650        /* trampoline_size is only needed for dynamically allocated tramps */
5651#endif
5652        ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5653};
5654
5655void ftrace_graph_sleep_time_control(bool enable)
5656{
5657        fgraph_sleep_time = enable;
5658}
5659
5660void ftrace_graph_graph_time_control(bool enable)
5661{
5662        fgraph_graph_time = enable;
5663}
5664
5665int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
5666{
5667        return 0;
5668}
5669
5670/* The callbacks that hook a function */
5671trace_func_graph_ret_t ftrace_graph_return =
5672                        (trace_func_graph_ret_t)ftrace_stub;
5673trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
5674static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
5675
5676/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
5677static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
5678{
5679        int i;
5680        int ret = 0;
5681        unsigned long flags;
5682        int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
5683        struct task_struct *g, *t;
5684
5685        for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
5686                ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
5687                                        * sizeof(struct ftrace_ret_stack),
5688                                        GFP_KERNEL);
5689                if (!ret_stack_list[i]) {
5690                        start = 0;
5691                        end = i;
5692                        ret = -ENOMEM;
5693                        goto free;
5694                }
5695        }
5696
5697        read_lock_irqsave(&tasklist_lock, flags);
5698        do_each_thread(g, t) {
5699                if (start == end) {
5700                        ret = -EAGAIN;
5701                        goto unlock;
5702                }
5703
5704                if (t->ret_stack == NULL) {
5705                        atomic_set(&t->tracing_graph_pause, 0);
5706                        atomic_set(&t->trace_overrun, 0);
5707                        t->curr_ret_stack = -1;
5708                        /* Make sure the tasks see the -1 first: */
5709                        smp_wmb();
5710                        t->ret_stack = ret_stack_list[start++];
5711                }
5712        } while_each_thread(g, t);
5713
5714unlock:
5715        read_unlock_irqrestore(&tasklist_lock, flags);
5716free:
5717        for (i = start; i < end; i++)
5718                kfree(ret_stack_list[i]);
5719        return ret;
5720}
5721
5722static void
5723ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
5724                        struct task_struct *prev, struct task_struct *next)
5725{
5726        unsigned long long timestamp;
5727        int index;
5728
5729        /*
5730         * Does the user want to count the time a function was asleep.
5731         * If so, do not update the time stamps.
5732         */
5733        if (fgraph_sleep_time)
5734                return;
5735
5736        timestamp = trace_clock_local();
5737
5738        prev->ftrace_timestamp = timestamp;
5739
5740        /* only process tasks that we timestamped */
5741        if (!next->ftrace_timestamp)
5742                return;
5743
5744        /*
5745         * Update all the counters in next to make up for the
5746         * time next was sleeping.
5747         */
5748        timestamp -= next->ftrace_timestamp;
5749
5750        for (index = next->curr_ret_stack; index >= 0; index--)
5751                next->ret_stack[index].calltime += timestamp;
5752}
5753
5754/* Allocate a return stack for each task */
5755static int start_graph_tracing(void)
5756{
5757        struct ftrace_ret_stack **ret_stack_list;
5758        int ret, cpu;
5759
5760        ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
5761                                sizeof(struct ftrace_ret_stack *),
5762                                GFP_KERNEL);
5763
5764        if (!ret_stack_list)
5765                return -ENOMEM;
5766
5767        /* The cpu_boot init_task->ret_stack will never be freed */
5768        for_each_online_cpu(cpu) {
5769                if (!idle_task(cpu)->ret_stack)
5770                        ftrace_graph_init_idle_task(idle_task(cpu), cpu);
5771        }
5772
5773        do {
5774                ret = alloc_retstack_tasklist(ret_stack_list);
5775        } while (ret == -EAGAIN);
5776
5777        if (!ret) {
5778                ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5779                if (ret)
5780                        pr_info("ftrace_graph: Couldn't activate tracepoint"
5781                                " probe to kernel_sched_switch\n");
5782        }
5783
5784        kfree(ret_stack_list);
5785        return ret;
5786}
5787
5788/*
5789 * Hibernation protection.
5790 * The state of the current task is too much unstable during
5791 * suspend/restore to disk. We want to protect against that.
5792 */
5793static int
5794ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5795                                                        void *unused)
5796{
5797        switch (state) {
5798        case PM_HIBERNATION_PREPARE:
5799                pause_graph_tracing();
5800                break;
5801
5802        case PM_POST_HIBERNATION:
5803                unpause_graph_tracing();
5804                break;
5805        }
5806        return NOTIFY_DONE;
5807}
5808
5809static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5810{
5811        if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5812                return 0;
5813        return __ftrace_graph_entry(trace);
5814}
5815
5816/*
5817 * The function graph tracer should only trace the functions defined
5818 * by set_ftrace_filter and set_ftrace_notrace. If another function
5819 * tracer ops is registered, the graph tracer requires testing the
5820 * function against the global ops, and not just trace any function
5821 * that any ftrace_ops registered.
5822 */
5823static void update_function_graph_func(void)
5824{
5825        struct ftrace_ops *op;
5826        bool do_test = false;
5827
5828        /*
5829         * The graph and global ops share the same set of functions
5830         * to test. If any other ops is on the list, then
5831         * the graph tracing needs to test if its the function
5832         * it should call.
5833         */
5834        do_for_each_ftrace_op(op, ftrace_ops_list) {
5835                if (op != &global_ops && op != &graph_ops &&
5836                    op != &ftrace_list_end) {
5837                        do_test = true;
5838                        /* in double loop, break out with goto */
5839                        goto out;
5840                }
5841        } while_for_each_ftrace_op(op);
5842 out:
5843        if (do_test)
5844                ftrace_graph_entry = ftrace_graph_entry_test;
5845        else
5846                ftrace_graph_entry = __ftrace_graph_entry;
5847}
5848
5849static struct notifier_block ftrace_suspend_notifier = {
5850        .notifier_call = ftrace_suspend_notifier_call,
5851};
5852
5853int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5854                        trace_func_graph_ent_t entryfunc)
5855{
5856        int ret = 0;
5857
5858        mutex_lock(&ftrace_lock);
5859
5860        /* we currently allow only one tracer registered at a time */
5861        if (ftrace_graph_active) {
5862                ret = -EBUSY;
5863                goto out;
5864        }
5865
5866        register_pm_notifier(&ftrace_suspend_notifier);
5867
5868        ftrace_graph_active++;
5869        ret = start_graph_tracing();
5870        if (ret) {
5871                ftrace_graph_active--;
5872                goto out;
5873        }
5874
5875        ftrace_graph_return = retfunc;
5876
5877        /*
5878         * Update the indirect function to the entryfunc, and the
5879         * function that gets called to the entry_test first. Then
5880         * call the update fgraph entry function to determine if
5881         * the entryfunc should be called directly or not.
5882         */
5883        __ftrace_graph_entry = entryfunc;
5884        ftrace_graph_entry = ftrace_graph_entry_test;
5885        update_function_graph_func();
5886
5887        ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
5888out:
5889        mutex_unlock(&ftrace_lock);
5890        return ret;
5891}
5892
5893void unregister_ftrace_graph(void)
5894{
5895        mutex_lock(&ftrace_lock);
5896
5897        if (unlikely(!ftrace_graph_active))
5898                goto out;
5899
5900        ftrace_graph_active--;
5901        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5902        ftrace_graph_entry = ftrace_graph_entry_stub;
5903        __ftrace_graph_entry = ftrace_graph_entry_stub;
5904        ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
5905        unregister_pm_notifier(&ftrace_suspend_notifier);
5906        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5907
5908#ifdef CONFIG_DYNAMIC_FTRACE
5909        /*
5910         * Function graph does not allocate the trampoline, but
5911         * other global_ops do. We need to reset the ALLOC_TRAMP flag
5912         * if one was used.
5913         */
5914        global_ops.trampoline = save_global_trampoline;
5915        if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
5916                global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
5917#endif
5918
5919 out:
5920        mutex_unlock(&ftrace_lock);
5921}
5922
5923static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
5924
5925static void
5926graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
5927{
5928        atomic_set(&t->tracing_graph_pause, 0);
5929        atomic_set(&t->trace_overrun, 0);
5930        t->ftrace_timestamp = 0;
5931        /* make curr_ret_stack visible before we add the ret_stack */
5932        smp_wmb();
5933        t->ret_stack = ret_stack;
5934}
5935
5936/*
5937 * Allocate a return stack for the idle task. May be the first
5938 * time through, or it may be done by CPU hotplug online.
5939 */
5940void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
5941{
5942        t->curr_ret_stack = -1;
5943        /*
5944         * The idle task has no parent, it either has its own
5945         * stack or no stack at all.
5946         */
5947        if (t->ret_stack)
5948                WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
5949
5950        if (ftrace_graph_active) {
5951                struct ftrace_ret_stack *ret_stack;
5952
5953                ret_stack = per_cpu(idle_ret_stack, cpu);
5954                if (!ret_stack) {
5955                        ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5956                                            * sizeof(struct ftrace_ret_stack),
5957                                            GFP_KERNEL);
5958                        if (!ret_stack)
5959                                return;
5960                        per_cpu(idle_ret_stack, cpu) = ret_stack;
5961                }
5962                graph_init_task(t, ret_stack);
5963        }
5964}
5965
5966/* Allocate a return stack for newly created task */
5967void ftrace_graph_init_task(struct task_struct *t)
5968{
5969        /* Make sure we do not use the parent ret_stack */
5970        t->ret_stack = NULL;
5971        t->curr_ret_stack = -1;
5972
5973        if (ftrace_graph_active) {
5974                struct ftrace_ret_stack *ret_stack;
5975
5976                ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5977                                * sizeof(struct ftrace_ret_stack),
5978                                GFP_KERNEL);
5979                if (!ret_stack)
5980                        return;
5981                graph_init_task(t, ret_stack);
5982        }
5983}
5984
5985void ftrace_graph_exit_task(struct task_struct *t)
5986{
5987        struct ftrace_ret_stack *ret_stack = t->ret_stack;
5988
5989        t->ret_stack = NULL;
5990        /* NULL must become visible to IRQs before we free it: */
5991        barrier();
5992
5993        kfree(ret_stack);
5994}
5995#endif
Note: See TracBrowser for help on using the repository browser.