source: src/linux/universal/linux-4.9/kernel/kthread.c @ 31885

Last change on this file since 31885 was 31885, checked in by brainslayer, 3 months ago

update

File size: 32.1 KB
Line 
1/* Kernel thread helper functions.
2 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
3 *
4 * Creation is done via kthreadd, so that we get a clean environment
5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
6 * etc.).
7 */
8#include <linux/sched.h>
9#include <linux/kthread.h>
10#include <linux/completion.h>
11#include <linux/err.h>
12#include <linux/cpuset.h>
13#include <linux/unistd.h>
14#include <linux/file.h>
15#include <linux/export.h>
16#include <linux/mutex.h>
17#include <linux/slab.h>
18#include <linux/freezer.h>
19#include <linux/ptrace.h>
20#include <linux/uaccess.h>
21#include <linux/cgroup.h>
22#include <trace/events/sched.h>
23
24static DEFINE_SPINLOCK(kthread_create_lock);
25static LIST_HEAD(kthread_create_list);
26struct task_struct *kthreadd_task;
27
28struct kthread_create_info
29{
30        /* Information passed to kthread() from kthreadd. */
31        int (*threadfn)(void *data);
32        void *data;
33        int node;
34
35        /* Result passed back to kthread_create() from kthreadd. */
36        struct task_struct *result;
37        struct completion *done;
38
39        struct list_head list;
40};
41
42struct kthread {
43        unsigned long flags;
44        unsigned int cpu;
45        void *data;
46        struct completion parked;
47        struct completion exited;
48};
49
50enum KTHREAD_BITS {
51        KTHREAD_IS_PER_CPU = 0,
52        KTHREAD_SHOULD_STOP,
53        KTHREAD_SHOULD_PARK,
54        KTHREAD_IS_PARKED,
55};
56
57#define __to_kthread(vfork)     \
58        container_of(vfork, struct kthread, exited)
59
60static inline struct kthread *to_kthread(struct task_struct *k)
61{
62        return __to_kthread(k->vfork_done);
63}
64
65static struct kthread *to_live_kthread(struct task_struct *k)
66{
67        struct completion *vfork = ACCESS_ONCE(k->vfork_done);
68        if (likely(vfork) && try_get_task_stack(k))
69                return __to_kthread(vfork);
70        return NULL;
71}
72
73/**
74 * kthread_should_stop - should this kthread return now?
75 *
76 * When someone calls kthread_stop() on your kthread, it will be woken
77 * and this will return true.  You should then return, and your return
78 * value will be passed through to kthread_stop().
79 */
80bool kthread_should_stop(void)
81{
82        return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
83}
84EXPORT_SYMBOL(kthread_should_stop);
85
86/**
87 * kthread_should_park - should this kthread park now?
88 *
89 * When someone calls kthread_park() on your kthread, it will be woken
90 * and this will return true.  You should then do the necessary
91 * cleanup and call kthread_parkme()
92 *
93 * Similar to kthread_should_stop(), but this keeps the thread alive
94 * and in a park position. kthread_unpark() "restarts" the thread and
95 * calls the thread function again.
96 */
97bool kthread_should_park(void)
98{
99        return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
100}
101EXPORT_SYMBOL_GPL(kthread_should_park);
102
103/**
104 * kthread_freezable_should_stop - should this freezable kthread return now?
105 * @was_frozen: optional out parameter, indicates whether %current was frozen
106 *
107 * kthread_should_stop() for freezable kthreads, which will enter
108 * refrigerator if necessary.  This function is safe from kthread_stop() /
109 * freezer deadlock and freezable kthreads should use this function instead
110 * of calling try_to_freeze() directly.
111 */
112bool kthread_freezable_should_stop(bool *was_frozen)
113{
114        bool frozen = false;
115
116        might_sleep();
117
118        if (unlikely(freezing(current)))
119                frozen = __refrigerator(true);
120
121        if (was_frozen)
122                *was_frozen = frozen;
123
124        return kthread_should_stop();
125}
126EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
127
128/**
129 * kthread_data - return data value specified on kthread creation
130 * @task: kthread task in question
131 *
132 * Return the data value specified when kthread @task was created.
133 * The caller is responsible for ensuring the validity of @task when
134 * calling this function.
135 */
136void *kthread_data(struct task_struct *task)
137{
138        return to_kthread(task)->data;
139}
140
141/**
142 * kthread_probe_data - speculative version of kthread_data()
143 * @task: possible kthread task in question
144 *
145 * @task could be a kthread task.  Return the data value specified when it
146 * was created if accessible.  If @task isn't a kthread task or its data is
147 * inaccessible for any reason, %NULL is returned.  This function requires
148 * that @task itself is safe to dereference.
149 */
150void *kthread_probe_data(struct task_struct *task)
151{
152        struct kthread *kthread = to_kthread(task);
153        void *data = NULL;
154
155        probe_kernel_read(&data, &kthread->data, sizeof(data));
156        return data;
157}
158
159static void __kthread_parkme(struct kthread *self)
160{
161        __set_current_state(TASK_PARKED);
162        while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
163                if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
164                        complete(&self->parked);
165                schedule();
166                __set_current_state(TASK_PARKED);
167        }
168        clear_bit(KTHREAD_IS_PARKED, &self->flags);
169        __set_current_state(TASK_RUNNING);
170}
171
172void kthread_parkme(void)
173{
174        __kthread_parkme(to_kthread(current));
175}
176EXPORT_SYMBOL_GPL(kthread_parkme);
177
178static int kthread(void *_create)
179{
180        /* Copy data: it's on kthread's stack */
181        struct kthread_create_info *create = _create;
182        int (*threadfn)(void *data) = create->threadfn;
183        void *data = create->data;
184        struct completion *done;
185        struct kthread self;
186        int ret;
187
188        self.flags = 0;
189        self.data = data;
190        init_completion(&self.exited);
191        init_completion(&self.parked);
192        current->vfork_done = &self.exited;
193
194        /* If user was SIGKILLed, I release the structure. */
195        done = xchg(&create->done, NULL);
196        if (!done) {
197                kfree(create);
198                do_exit(-EINTR);
199        }
200        /* OK, tell user we're spawned, wait for stop or wakeup */
201        __set_current_state(TASK_UNINTERRUPTIBLE);
202        create->result = current;
203        complete(done);
204        schedule();
205
206        ret = -EINTR;
207
208        if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
209                cgroup_kthread_ready();
210                __kthread_parkme(&self);
211                ret = threadfn(data);
212        }
213        /* we can't just return, we must preserve "self" on stack */
214        do_exit(ret);
215}
216
217/* called from do_fork() to get node information for about to be created task */
218int tsk_fork_get_node(struct task_struct *tsk)
219{
220#ifdef CONFIG_NUMA
221        if (tsk == kthreadd_task)
222                return tsk->pref_node_fork;
223#endif
224        return NUMA_NO_NODE;
225}
226
227static void create_kthread(struct kthread_create_info *create)
228{
229        int pid;
230
231#ifdef CONFIG_NUMA
232        current->pref_node_fork = create->node;
233#endif
234        /* We want our own signal handler (we take no signals by default). */
235        pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
236        if (pid < 0) {
237                /* If user was SIGKILLed, I release the structure. */
238                struct completion *done = xchg(&create->done, NULL);
239
240                if (!done) {
241                        kfree(create);
242                        return;
243                }
244                create->result = ERR_PTR(pid);
245                complete(done);
246        }
247}
248
249static struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
250                                                    void *data, int node,
251                                                    const char namefmt[],
252                                                    va_list args)
253{
254        DECLARE_COMPLETION_ONSTACK(done);
255        struct task_struct *task;
256        struct kthread_create_info *create = kmalloc(sizeof(*create),
257                                                     GFP_KERNEL);
258
259        if (!create)
260                return ERR_PTR(-ENOMEM);
261        create->threadfn = threadfn;
262        create->data = data;
263        create->node = node;
264        create->done = &done;
265
266        spin_lock(&kthread_create_lock);
267        list_add_tail(&create->list, &kthread_create_list);
268        spin_unlock(&kthread_create_lock);
269
270        wake_up_process(kthreadd_task);
271        /*
272         * Wait for completion in killable state, for I might be chosen by
273         * the OOM killer while kthreadd is trying to allocate memory for
274         * new kernel thread.
275         */
276        if (unlikely(wait_for_completion_killable(&done))) {
277                /*
278                 * If I was SIGKILLed before kthreadd (or new kernel thread)
279                 * calls complete(), leave the cleanup of this structure to
280                 * that thread.
281                 */
282                if (xchg(&create->done, NULL))
283                        return ERR_PTR(-EINTR);
284                /*
285                 * kthreadd (or new kernel thread) will call complete()
286                 * shortly.
287                 */
288                wait_for_completion(&done);
289        }
290        task = create->result;
291        if (!IS_ERR(task)) {
292                static const struct sched_param param = { .sched_priority = 0 };
293
294                vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
295                /*
296                 * root may have changed our (kthreadd's) priority or CPU mask.
297                 * The kernel thread should not inherit these properties.
298                 */
299                sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
300                set_cpus_allowed_ptr(task, cpu_all_mask);
301        }
302        kfree(create);
303        return task;
304}
305
306/**
307 * kthread_create_on_node - create a kthread.
308 * @threadfn: the function to run until signal_pending(current).
309 * @data: data ptr for @threadfn.
310 * @node: task and thread structures for the thread are allocated on this node
311 * @namefmt: printf-style name for the thread.
312 *
313 * Description: This helper function creates and names a kernel
314 * thread.  The thread will be stopped: use wake_up_process() to start
315 * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
316 * is affine to all CPUs.
317 *
318 * If thread is going to be bound on a particular cpu, give its node
319 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
320 * When woken, the thread will run @threadfn() with @data as its
321 * argument. @threadfn() can either call do_exit() directly if it is a
322 * standalone thread for which no one will call kthread_stop(), or
323 * return when 'kthread_should_stop()' is true (which means
324 * kthread_stop() has been called).  The return value should be zero
325 * or a negative error number; it will be passed to kthread_stop().
326 *
327 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
328 */
329struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
330                                           void *data, int node,
331                                           const char namefmt[],
332                                           ...)
333{
334        struct task_struct *task;
335        va_list args;
336
337        va_start(args, namefmt);
338        task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
339        va_end(args);
340
341        return task;
342}
343EXPORT_SYMBOL(kthread_create_on_node);
344
345static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
346{
347        unsigned long flags;
348
349        if (!wait_task_inactive(p, state)) {
350                WARN_ON(1);
351                return;
352        }
353
354        /* It's safe because the task is inactive. */
355        raw_spin_lock_irqsave(&p->pi_lock, flags);
356        do_set_cpus_allowed(p, mask);
357        p->flags |= PF_NO_SETAFFINITY;
358        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
359}
360
361static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
362{
363        __kthread_bind_mask(p, cpumask_of(cpu), state);
364}
365
366void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
367{
368        __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
369}
370
371/**
372 * kthread_bind - bind a just-created kthread to a cpu.
373 * @p: thread created by kthread_create().
374 * @cpu: cpu (might not be online, must be possible) for @k to run on.
375 *
376 * Description: This function is equivalent to set_cpus_allowed(),
377 * except that @cpu doesn't need to be online, and the thread must be
378 * stopped (i.e., just returned from kthread_create()).
379 */
380void kthread_bind(struct task_struct *p, unsigned int cpu)
381{
382        __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
383}
384EXPORT_SYMBOL(kthread_bind);
385
386/**
387 * kthread_create_on_cpu - Create a cpu bound kthread
388 * @threadfn: the function to run until signal_pending(current).
389 * @data: data ptr for @threadfn.
390 * @cpu: The cpu on which the thread should be bound,
391 * @namefmt: printf-style name for the thread. Format is restricted
392 *           to "name.*%u". Code fills in cpu number.
393 *
394 * Description: This helper function creates and names a kernel thread
395 * The thread will be woken and put into park mode.
396 */
397struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
398                                          void *data, unsigned int cpu,
399                                          const char *namefmt)
400{
401        struct task_struct *p;
402
403        p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
404                                   cpu);
405        if (IS_ERR(p))
406                return p;
407        kthread_bind(p, cpu);
408        /* CPU hotplug need to bind once again when unparking the thread. */
409        set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
410        to_kthread(p)->cpu = cpu;
411        return p;
412}
413
414static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
415{
416        clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
417        /*
418         * We clear the IS_PARKED bit here as we don't wait
419         * until the task has left the park code. So if we'd
420         * park before that happens we'd see the IS_PARKED bit
421         * which might be about to be cleared.
422         */
423        if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
424                /*
425                 * Newly created kthread was parked when the CPU was offline.
426                 * The binding was lost and we need to set it again.
427                 */
428                if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
429                        __kthread_bind(k, kthread->cpu, TASK_PARKED);
430                wake_up_state(k, TASK_PARKED);
431        }
432}
433
434/**
435 * kthread_unpark - unpark a thread created by kthread_create().
436 * @k:          thread created by kthread_create().
437 *
438 * Sets kthread_should_park() for @k to return false, wakes it, and
439 * waits for it to return. If the thread is marked percpu then its
440 * bound to the cpu again.
441 */
442void kthread_unpark(struct task_struct *k)
443{
444        struct kthread *kthread = to_live_kthread(k);
445
446        if (kthread) {
447                __kthread_unpark(k, kthread);
448                put_task_stack(k);
449        }
450}
451EXPORT_SYMBOL_GPL(kthread_unpark);
452
453/**
454 * kthread_park - park a thread created by kthread_create().
455 * @k: thread created by kthread_create().
456 *
457 * Sets kthread_should_park() for @k to return true, wakes it, and
458 * waits for it to return. This can also be called after kthread_create()
459 * instead of calling wake_up_process(): the thread will park without
460 * calling threadfn().
461 *
462 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
463 * If called by the kthread itself just the park bit is set.
464 */
465int kthread_park(struct task_struct *k)
466{
467        struct kthread *kthread = to_live_kthread(k);
468        int ret = -ENOSYS;
469
470        if (kthread) {
471                if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
472                        set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
473                        if (k != current) {
474                                wake_up_process(k);
475                                wait_for_completion(&kthread->parked);
476                        }
477                }
478                put_task_stack(k);
479                ret = 0;
480        }
481        return ret;
482}
483EXPORT_SYMBOL_GPL(kthread_park);
484
485/**
486 * kthread_stop - stop a thread created by kthread_create().
487 * @k: thread created by kthread_create().
488 *
489 * Sets kthread_should_stop() for @k to return true, wakes it, and
490 * waits for it to exit. This can also be called after kthread_create()
491 * instead of calling wake_up_process(): the thread will exit without
492 * calling threadfn().
493 *
494 * If threadfn() may call do_exit() itself, the caller must ensure
495 * task_struct can't go away.
496 *
497 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
498 * was never called.
499 */
500int kthread_stop(struct task_struct *k)
501{
502        struct kthread *kthread;
503        int ret;
504
505        trace_sched_kthread_stop(k);
506
507        get_task_struct(k);
508        kthread = to_live_kthread(k);
509        if (kthread) {
510                set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
511                __kthread_unpark(k, kthread);
512                wake_up_process(k);
513                wait_for_completion(&kthread->exited);
514                put_task_stack(k);
515        }
516        ret = k->exit_code;
517        put_task_struct(k);
518
519        trace_sched_kthread_stop_ret(ret);
520        return ret;
521}
522EXPORT_SYMBOL(kthread_stop);
523
524int kthreadd(void *unused)
525{
526        struct task_struct *tsk = current;
527
528        /* Setup a clean context for our children to inherit. */
529        set_task_comm(tsk, "kthreadd");
530        ignore_signals(tsk);
531        set_cpus_allowed_ptr(tsk, cpu_all_mask);
532        set_mems_allowed(node_states[N_MEMORY]);
533
534        current->flags |= PF_NOFREEZE;
535        cgroup_init_kthreadd();
536
537        for (;;) {
538                set_current_state(TASK_INTERRUPTIBLE);
539                if (list_empty(&kthread_create_list))
540                        schedule();
541                __set_current_state(TASK_RUNNING);
542
543                spin_lock(&kthread_create_lock);
544                while (!list_empty(&kthread_create_list)) {
545                        struct kthread_create_info *create;
546
547                        create = list_entry(kthread_create_list.next,
548                                            struct kthread_create_info, list);
549                        list_del_init(&create->list);
550                        spin_unlock(&kthread_create_lock);
551
552                        create_kthread(create);
553
554                        spin_lock(&kthread_create_lock);
555                }
556                spin_unlock(&kthread_create_lock);
557        }
558
559        return 0;
560}
561
562void __kthread_init_worker(struct kthread_worker *worker,
563                                const char *name,
564                                struct lock_class_key *key)
565{
566        memset(worker, 0, sizeof(struct kthread_worker));
567        spin_lock_init(&worker->lock);
568        lockdep_set_class_and_name(&worker->lock, key, name);
569        INIT_LIST_HEAD(&worker->work_list);
570        INIT_LIST_HEAD(&worker->delayed_work_list);
571}
572EXPORT_SYMBOL_GPL(__kthread_init_worker);
573
574/**
575 * kthread_worker_fn - kthread function to process kthread_worker
576 * @worker_ptr: pointer to initialized kthread_worker
577 *
578 * This function implements the main cycle of kthread worker. It processes
579 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
580 * is empty.
581 *
582 * The works are not allowed to keep any locks, disable preemption or interrupts
583 * when they finish. There is defined a safe point for freezing when one work
584 * finishes and before a new one is started.
585 *
586 * Also the works must not be handled by more than one worker at the same time,
587 * see also kthread_queue_work().
588 */
589int kthread_worker_fn(void *worker_ptr)
590{
591        struct kthread_worker *worker = worker_ptr;
592        struct kthread_work *work;
593
594        /*
595         * FIXME: Update the check and remove the assignment when all kthread
596         * worker users are created using kthread_create_worker*() functions.
597         */
598        WARN_ON(worker->task && worker->task != current);
599        worker->task = current;
600
601        if (worker->flags & KTW_FREEZABLE)
602                set_freezable();
603
604repeat:
605        set_current_state(TASK_INTERRUPTIBLE);  /* mb paired w/ kthread_stop */
606
607        if (kthread_should_stop()) {
608                __set_current_state(TASK_RUNNING);
609                spin_lock_irq(&worker->lock);
610                worker->task = NULL;
611                spin_unlock_irq(&worker->lock);
612                return 0;
613        }
614
615        work = NULL;
616        spin_lock_irq(&worker->lock);
617        if (!list_empty(&worker->work_list)) {
618                work = list_first_entry(&worker->work_list,
619                                        struct kthread_work, node);
620                list_del_init(&work->node);
621        }
622        worker->current_work = work;
623        spin_unlock_irq(&worker->lock);
624
625        if (work) {
626                __set_current_state(TASK_RUNNING);
627                work->func(work);
628        } else if (!freezing(current))
629                schedule();
630
631        try_to_freeze();
632        goto repeat;
633}
634EXPORT_SYMBOL_GPL(kthread_worker_fn);
635
636static struct kthread_worker *
637__kthread_create_worker(int cpu, unsigned int flags,
638                        const char namefmt[], va_list args)
639{
640        struct kthread_worker *worker;
641        struct task_struct *task;
642
643        worker = kzalloc(sizeof(*worker), GFP_KERNEL);
644        if (!worker)
645                return ERR_PTR(-ENOMEM);
646
647        kthread_init_worker(worker);
648
649        if (cpu >= 0) {
650                char name[TASK_COMM_LEN];
651
652                /*
653                 * kthread_create_worker_on_cpu() allows to pass a generic
654                 * namefmt in compare with kthread_create_on_cpu. We need
655                 * to format it here.
656                 */
657                vsnprintf(name, sizeof(name), namefmt, args);
658                task = kthread_create_on_cpu(kthread_worker_fn, worker,
659                                             cpu, name);
660        } else {
661                task = __kthread_create_on_node(kthread_worker_fn, worker,
662                                                -1, namefmt, args);
663        }
664
665        if (IS_ERR(task))
666                goto fail_task;
667
668        worker->flags = flags;
669        worker->task = task;
670        wake_up_process(task);
671        return worker;
672
673fail_task:
674        kfree(worker);
675        return ERR_CAST(task);
676}
677
678/**
679 * kthread_create_worker - create a kthread worker
680 * @flags: flags modifying the default behavior of the worker
681 * @namefmt: printf-style name for the kthread worker (task).
682 *
683 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
684 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
685 * when the worker was SIGKILLed.
686 */
687struct kthread_worker *
688kthread_create_worker(unsigned int flags, const char namefmt[], ...)
689{
690        struct kthread_worker *worker;
691        va_list args;
692
693        va_start(args, namefmt);
694        worker = __kthread_create_worker(-1, flags, namefmt, args);
695        va_end(args);
696
697        return worker;
698}
699EXPORT_SYMBOL(kthread_create_worker);
700
701/**
702 * kthread_create_worker_on_cpu - create a kthread worker and bind it
703 *      it to a given CPU and the associated NUMA node.
704 * @cpu: CPU number
705 * @flags: flags modifying the default behavior of the worker
706 * @namefmt: printf-style name for the kthread worker (task).
707 *
708 * Use a valid CPU number if you want to bind the kthread worker
709 * to the given CPU and the associated NUMA node.
710 *
711 * A good practice is to add the cpu number also into the worker name.
712 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
713 *
714 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
715 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
716 * when the worker was SIGKILLed.
717 */
718struct kthread_worker *
719kthread_create_worker_on_cpu(int cpu, unsigned int flags,
720                             const char namefmt[], ...)
721{
722        struct kthread_worker *worker;
723        va_list args;
724
725        va_start(args, namefmt);
726        worker = __kthread_create_worker(cpu, flags, namefmt, args);
727        va_end(args);
728
729        return worker;
730}
731EXPORT_SYMBOL(kthread_create_worker_on_cpu);
732
733/*
734 * Returns true when the work could not be queued at the moment.
735 * It happens when it is already pending in a worker list
736 * or when it is being cancelled.
737 */
738static inline bool queuing_blocked(struct kthread_worker *worker,
739                                   struct kthread_work *work)
740{
741        lockdep_assert_held(&worker->lock);
742
743        return !list_empty(&work->node) || work->canceling;
744}
745
746static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
747                                             struct kthread_work *work)
748{
749        lockdep_assert_held(&worker->lock);
750        WARN_ON_ONCE(!list_empty(&work->node));
751        /* Do not use a work with >1 worker, see kthread_queue_work() */
752        WARN_ON_ONCE(work->worker && work->worker != worker);
753}
754
755/* insert @work before @pos in @worker */
756static void kthread_insert_work(struct kthread_worker *worker,
757                                struct kthread_work *work,
758                                struct list_head *pos)
759{
760        kthread_insert_work_sanity_check(worker, work);
761
762        list_add_tail(&work->node, pos);
763        work->worker = worker;
764        if (!worker->current_work && likely(worker->task))
765                wake_up_process(worker->task);
766}
767
768/**
769 * kthread_queue_work - queue a kthread_work
770 * @worker: target kthread_worker
771 * @work: kthread_work to queue
772 *
773 * Queue @work to work processor @task for async execution.  @task
774 * must have been created with kthread_worker_create().  Returns %true
775 * if @work was successfully queued, %false if it was already pending.
776 *
777 * Reinitialize the work if it needs to be used by another worker.
778 * For example, when the worker was stopped and started again.
779 */
780bool kthread_queue_work(struct kthread_worker *worker,
781                        struct kthread_work *work)
782{
783        bool ret = false;
784        unsigned long flags;
785
786        spin_lock_irqsave(&worker->lock, flags);
787        if (!queuing_blocked(worker, work)) {
788                kthread_insert_work(worker, work, &worker->work_list);
789                ret = true;
790        }
791        spin_unlock_irqrestore(&worker->lock, flags);
792        return ret;
793}
794EXPORT_SYMBOL_GPL(kthread_queue_work);
795
796/**
797 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
798 *      delayed work when the timer expires.
799 * @__data: pointer to the data associated with the timer
800 *
801 * The format of the function is defined by struct timer_list.
802 * It should have been called from irqsafe timer with irq already off.
803 */
804void kthread_delayed_work_timer_fn(unsigned long __data)
805{
806        struct kthread_delayed_work *dwork =
807                (struct kthread_delayed_work *)__data;
808        struct kthread_work *work = &dwork->work;
809        struct kthread_worker *worker = work->worker;
810
811        /*
812         * This might happen when a pending work is reinitialized.
813         * It means that it is used a wrong way.
814         */
815        if (WARN_ON_ONCE(!worker))
816                return;
817
818        spin_lock(&worker->lock);
819        /* Work must not be used with >1 worker, see kthread_queue_work(). */
820        WARN_ON_ONCE(work->worker != worker);
821
822        /* Move the work from worker->delayed_work_list. */
823        WARN_ON_ONCE(list_empty(&work->node));
824        list_del_init(&work->node);
825        kthread_insert_work(worker, work, &worker->work_list);
826
827        spin_unlock(&worker->lock);
828}
829EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
830
831void __kthread_queue_delayed_work(struct kthread_worker *worker,
832                                  struct kthread_delayed_work *dwork,
833                                  unsigned long delay)
834{
835        struct timer_list *timer = &dwork->timer;
836        struct kthread_work *work = &dwork->work;
837
838        WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn ||
839                     timer->data != (unsigned long)dwork);
840
841        /*
842         * If @delay is 0, queue @dwork->work immediately.  This is for
843         * both optimization and correctness.  The earliest @timer can
844         * expire is on the closest next tick and delayed_work users depend
845         * on that there's no such delay when @delay is 0.
846         */
847        if (!delay) {
848                kthread_insert_work(worker, work, &worker->work_list);
849                return;
850        }
851
852        /* Be paranoid and try to detect possible races already now. */
853        kthread_insert_work_sanity_check(worker, work);
854
855        list_add(&work->node, &worker->delayed_work_list);
856        work->worker = worker;
857        timer_stats_timer_set_start_info(&dwork->timer);
858        timer->expires = jiffies + delay;
859        add_timer(timer);
860}
861
862/**
863 * kthread_queue_delayed_work - queue the associated kthread work
864 *      after a delay.
865 * @worker: target kthread_worker
866 * @dwork: kthread_delayed_work to queue
867 * @delay: number of jiffies to wait before queuing
868 *
869 * If the work has not been pending it starts a timer that will queue
870 * the work after the given @delay. If @delay is zero, it queues the
871 * work immediately.
872 *
873 * Return: %false if the @work has already been pending. It means that
874 * either the timer was running or the work was queued. It returns %true
875 * otherwise.
876 */
877bool kthread_queue_delayed_work(struct kthread_worker *worker,
878                                struct kthread_delayed_work *dwork,
879                                unsigned long delay)
880{
881        struct kthread_work *work = &dwork->work;
882        unsigned long flags;
883        bool ret = false;
884
885        spin_lock_irqsave(&worker->lock, flags);
886
887        if (!queuing_blocked(worker, work)) {
888                __kthread_queue_delayed_work(worker, dwork, delay);
889                ret = true;
890        }
891
892        spin_unlock_irqrestore(&worker->lock, flags);
893        return ret;
894}
895EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
896
897struct kthread_flush_work {
898        struct kthread_work     work;
899        struct completion       done;
900};
901
902static void kthread_flush_work_fn(struct kthread_work *work)
903{
904        struct kthread_flush_work *fwork =
905                container_of(work, struct kthread_flush_work, work);
906        complete(&fwork->done);
907}
908
909/**
910 * kthread_flush_work - flush a kthread_work
911 * @work: work to flush
912 *
913 * If @work is queued or executing, wait for it to finish execution.
914 */
915void kthread_flush_work(struct kthread_work *work)
916{
917        struct kthread_flush_work fwork = {
918                KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
919                COMPLETION_INITIALIZER_ONSTACK(fwork.done),
920        };
921        struct kthread_worker *worker;
922        bool noop = false;
923
924        worker = work->worker;
925        if (!worker)
926                return;
927
928        spin_lock_irq(&worker->lock);
929        /* Work must not be used with >1 worker, see kthread_queue_work(). */
930        WARN_ON_ONCE(work->worker != worker);
931
932        if (!list_empty(&work->node))
933                kthread_insert_work(worker, &fwork.work, work->node.next);
934        else if (worker->current_work == work)
935                kthread_insert_work(worker, &fwork.work,
936                                    worker->work_list.next);
937        else
938                noop = true;
939
940        spin_unlock_irq(&worker->lock);
941
942        if (!noop)
943                wait_for_completion(&fwork.done);
944}
945EXPORT_SYMBOL_GPL(kthread_flush_work);
946
947/*
948 * This function removes the work from the worker queue. Also it makes sure
949 * that it won't get queued later via the delayed work's timer.
950 *
951 * The work might still be in use when this function finishes. See the
952 * current_work proceed by the worker.
953 *
954 * Return: %true if @work was pending and successfully canceled,
955 *      %false if @work was not pending
956 */
957static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
958                                  unsigned long *flags)
959{
960        /* Try to cancel the timer if exists. */
961        if (is_dwork) {
962                struct kthread_delayed_work *dwork =
963                        container_of(work, struct kthread_delayed_work, work);
964                struct kthread_worker *worker = work->worker;
965
966                /*
967                 * del_timer_sync() must be called to make sure that the timer
968                 * callback is not running. The lock must be temporary released
969                 * to avoid a deadlock with the callback. In the meantime,
970                 * any queuing is blocked by setting the canceling counter.
971                 */
972                work->canceling++;
973                spin_unlock_irqrestore(&worker->lock, *flags);
974                del_timer_sync(&dwork->timer);
975                spin_lock_irqsave(&worker->lock, *flags);
976                work->canceling--;
977        }
978
979        /*
980         * Try to remove the work from a worker list. It might either
981         * be from worker->work_list or from worker->delayed_work_list.
982         */
983        if (!list_empty(&work->node)) {
984                list_del_init(&work->node);
985                return true;
986        }
987
988        return false;
989}
990
991/**
992 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
993 * @worker: kthread worker to use
994 * @dwork: kthread delayed work to queue
995 * @delay: number of jiffies to wait before queuing
996 *
997 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
998 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
999 * @work is guaranteed to be queued immediately.
1000 *
1001 * Return: %true if @dwork was pending and its timer was modified,
1002 * %false otherwise.
1003 *
1004 * A special case is when the work is being canceled in parallel.
1005 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1006 * or yet another kthread_mod_delayed_work() call. We let the other command
1007 * win and return %false here. The caller is supposed to synchronize these
1008 * operations a reasonable way.
1009 *
1010 * This function is safe to call from any context including IRQ handler.
1011 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1012 * for details.
1013 */
1014bool kthread_mod_delayed_work(struct kthread_worker *worker,
1015                              struct kthread_delayed_work *dwork,
1016                              unsigned long delay)
1017{
1018        struct kthread_work *work = &dwork->work;
1019        unsigned long flags;
1020        int ret = false;
1021
1022        spin_lock_irqsave(&worker->lock, flags);
1023
1024        /* Do not bother with canceling when never queued. */
1025        if (!work->worker)
1026                goto fast_queue;
1027
1028        /* Work must not be used with >1 worker, see kthread_queue_work() */
1029        WARN_ON_ONCE(work->worker != worker);
1030
1031        /* Do not fight with another command that is canceling this work. */
1032        if (work->canceling)
1033                goto out;
1034
1035        ret = __kthread_cancel_work(work, true, &flags);
1036fast_queue:
1037        __kthread_queue_delayed_work(worker, dwork, delay);
1038out:
1039        spin_unlock_irqrestore(&worker->lock, flags);
1040        return ret;
1041}
1042EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1043
1044static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1045{
1046        struct kthread_worker *worker = work->worker;
1047        unsigned long flags;
1048        int ret = false;
1049
1050        if (!worker)
1051                goto out;
1052
1053        spin_lock_irqsave(&worker->lock, flags);
1054        /* Work must not be used with >1 worker, see kthread_queue_work(). */
1055        WARN_ON_ONCE(work->worker != worker);
1056
1057        ret = __kthread_cancel_work(work, is_dwork, &flags);
1058
1059        if (worker->current_work != work)
1060                goto out_fast;
1061
1062        /*
1063         * The work is in progress and we need to wait with the lock released.
1064         * In the meantime, block any queuing by setting the canceling counter.
1065         */
1066        work->canceling++;
1067        spin_unlock_irqrestore(&worker->lock, flags);
1068        kthread_flush_work(work);
1069        spin_lock_irqsave(&worker->lock, flags);
1070        work->canceling--;
1071
1072out_fast:
1073        spin_unlock_irqrestore(&worker->lock, flags);
1074out:
1075        return ret;
1076}
1077
1078/**
1079 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1080 * @work: the kthread work to cancel
1081 *
1082 * Cancel @work and wait for its execution to finish.  This function
1083 * can be used even if the work re-queues itself. On return from this
1084 * function, @work is guaranteed to be not pending or executing on any CPU.
1085 *
1086 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1087 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1088 *
1089 * The caller must ensure that the worker on which @work was last
1090 * queued can't be destroyed before this function returns.
1091 *
1092 * Return: %true if @work was pending, %false otherwise.
1093 */
1094bool kthread_cancel_work_sync(struct kthread_work *work)
1095{
1096        return __kthread_cancel_work_sync(work, false);
1097}
1098EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1099
1100/**
1101 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1102 *      wait for it to finish.
1103 * @dwork: the kthread delayed work to cancel
1104 *
1105 * This is kthread_cancel_work_sync() for delayed works.
1106 *
1107 * Return: %true if @dwork was pending, %false otherwise.
1108 */
1109bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1110{
1111        return __kthread_cancel_work_sync(&dwork->work, true);
1112}
1113EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1114
1115/**
1116 * kthread_flush_worker - flush all current works on a kthread_worker
1117 * @worker: worker to flush
1118 *
1119 * Wait until all currently executing or pending works on @worker are
1120 * finished.
1121 */
1122void kthread_flush_worker(struct kthread_worker *worker)
1123{
1124        struct kthread_flush_work fwork = {
1125                KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1126                COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1127        };
1128
1129        kthread_queue_work(worker, &fwork.work);
1130        wait_for_completion(&fwork.done);
1131}
1132EXPORT_SYMBOL_GPL(kthread_flush_worker);
1133
1134/**
1135 * kthread_destroy_worker - destroy a kthread worker
1136 * @worker: worker to be destroyed
1137 *
1138 * Flush and destroy @worker.  The simple flush is enough because the kthread
1139 * worker API is used only in trivial scenarios.  There are no multi-step state
1140 * machines needed.
1141 */
1142void kthread_destroy_worker(struct kthread_worker *worker)
1143{
1144        struct task_struct *task;
1145
1146        task = worker->task;
1147        if (WARN_ON(!task))
1148                return;
1149
1150        kthread_flush_worker(worker);
1151        kthread_stop(task);
1152        WARN_ON(!list_empty(&worker->work_list));
1153        kfree(worker);
1154}
1155EXPORT_SYMBOL(kthread_destroy_worker);
Note: See TracBrowser for help on using the repository browser.