source: src/linux/universal/linux-3.18/mm/percpu.c @ 31885

Last change on this file since 31885 was 31885, checked in by brainslayer, 2 months ago

update

File size: 66.6 KB
Line 
1/*
2 * mm/percpu.c - percpu memory allocator
3 *
4 * Copyright (C) 2009           SUSE Linux Products GmbH
5 * Copyright (C) 2009           Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * This is percpu allocator which can handle both static and dynamic
10 * areas.  Percpu areas are allocated in chunks.  Each chunk is
11 * consisted of boot-time determined number of units and the first
12 * chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated.
17 *
18 *  c0                           c1                         c2
19 *  -------------------          -------------------        ------------
20 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
21 *  -------------------  ......  -------------------  ....  ------------
22 *
23 * Allocation is done in offset-size areas of single unit space.  Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
26 * cpus.  On NUMA, the mapping can be non-linear and even sparse.
27 * Percpu access can be done by configuring percpu base registers
28 * according to cpu to unit mapping and pcpu_unit_size.
29 *
30 * There are usually many small percpu allocations many of them being
31 * as small as 4 bytes.  The allocator organizes chunks into lists
32 * according to free size and tries to allocate from the fullest one.
33 * Each chunk keeps the maximum contiguous area size hint which is
34 * guaranteed to be equal to or larger than the maximum contiguous
35 * area in the chunk.  This helps the allocator not to iterate the
36 * chunk maps unnecessarily.
37 *
38 * Allocation state in each chunk is kept using an array of integers
39 * on chunk->map.  A positive value in the map represents a free
40 * region and negative allocated.  Allocation inside a chunk is done
41 * by scanning this map sequentially and serving the first matching
42 * entry.  This is mostly copied from the percpu_modalloc() allocator.
43 * Chunks can be determined from the address using the index field
44 * in the page struct. The index field contains a pointer to the chunk.
45 *
46 * To use this allocator, arch code should do the followings.
47 *
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 *   regular address to percpu pointer and back if they need to be
50 *   different from the default
51 *
52 * - use pcpu_setup_first_chunk() during percpu area initialization to
53 *   setup the first chunk containing the kernel static percpu area
54 */
55
56#include <linux/bitmap.h>
57#include <linux/bootmem.h>
58#include <linux/err.h>
59#include <linux/list.h>
60#include <linux/log2.h>
61#include <linux/mm.h>
62#include <linux/module.h>
63#include <linux/mutex.h>
64#include <linux/percpu.h>
65#include <linux/pfn.h>
66#include <linux/slab.h>
67#include <linux/spinlock.h>
68#include <linux/vmalloc.h>
69#include <linux/workqueue.h>
70#include <linux/kmemleak.h>
71
72#include <asm/cacheflush.h>
73#include <asm/sections.h>
74#include <asm/tlbflush.h>
75#include <asm/io.h>
76
77#define PCPU_SLOT_BASE_SHIFT            5       /* 1-31 shares the same slot */
78#define PCPU_DFL_MAP_ALLOC              16      /* start a map with 16 ents */
79#define PCPU_ATOMIC_MAP_MARGIN_LOW      32
80#define PCPU_ATOMIC_MAP_MARGIN_HIGH     64
81#define PCPU_EMPTY_POP_PAGES_LOW        2
82#define PCPU_EMPTY_POP_PAGES_HIGH       4
83
84#ifdef CONFIG_SMP
85/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
86#ifndef __addr_to_pcpu_ptr
87#define __addr_to_pcpu_ptr(addr)                                        \
88        (void __percpu *)((unsigned long)(addr) -                       \
89                          (unsigned long)pcpu_base_addr +               \
90                          (unsigned long)__per_cpu_start)
91#endif
92#ifndef __pcpu_ptr_to_addr
93#define __pcpu_ptr_to_addr(ptr)                                         \
94        (void __force *)((unsigned long)(ptr) +                         \
95                         (unsigned long)pcpu_base_addr -                \
96                         (unsigned long)__per_cpu_start)
97#endif
98#else   /* CONFIG_SMP */
99/* on UP, it's always identity mapped */
100#define __addr_to_pcpu_ptr(addr)        (void __percpu *)(addr)
101#define __pcpu_ptr_to_addr(ptr)         (void __force *)(ptr)
102#endif  /* CONFIG_SMP */
103
104struct pcpu_chunk {
105        struct list_head        list;           /* linked to pcpu_slot lists */
106        int                     free_size;      /* free bytes in the chunk */
107        int                     contig_hint;    /* max contiguous size hint */
108        void                    *base_addr;     /* base address of this chunk */
109
110        int                     map_used;       /* # of map entries used before the sentry */
111        int                     map_alloc;      /* # of map entries allocated */
112        int                     *map;           /* allocation map */
113        struct list_head        map_extend_list;/* on pcpu_map_extend_chunks */
114
115        void                    *data;          /* chunk data */
116        int                     first_free;     /* no free below this */
117        bool                    immutable;      /* no [de]population allowed */
118        int                     nr_populated;   /* # of populated pages */
119        unsigned long           populated[];    /* populated bitmap */
120};
121
122static int pcpu_unit_pages __read_mostly;
123static int pcpu_unit_size __read_mostly;
124static int pcpu_nr_units __read_mostly;
125static int pcpu_atom_size __read_mostly;
126static int pcpu_nr_slots __read_mostly;
127static size_t pcpu_chunk_struct_size __read_mostly;
128
129/* cpus with the lowest and highest unit addresses */
130static unsigned int pcpu_low_unit_cpu __read_mostly;
131static unsigned int pcpu_high_unit_cpu __read_mostly;
132
133/* the address of the first chunk which starts with the kernel static area */
134void *pcpu_base_addr __read_mostly;
135EXPORT_SYMBOL_GPL(pcpu_base_addr);
136
137static const int *pcpu_unit_map __read_mostly;          /* cpu -> unit */
138const unsigned long *pcpu_unit_offsets __read_mostly;   /* cpu -> unit offset */
139
140/* group information, used for vm allocation */
141static int pcpu_nr_groups __read_mostly;
142static const unsigned long *pcpu_group_offsets __read_mostly;
143static const size_t *pcpu_group_sizes __read_mostly;
144
145/*
146 * The first chunk which always exists.  Note that unlike other
147 * chunks, this one can be allocated and mapped in several different
148 * ways and thus often doesn't live in the vmalloc area.
149 */
150static struct pcpu_chunk *pcpu_first_chunk;
151
152/*
153 * Optional reserved chunk.  This chunk reserves part of the first
154 * chunk and serves it for reserved allocations.  The amount of
155 * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
156 * area doesn't exist, the following variables contain NULL and 0
157 * respectively.
158 */
159static struct pcpu_chunk *pcpu_reserved_chunk;
160static int pcpu_reserved_chunk_limit;
161
162static DEFINE_SPINLOCK(pcpu_lock);      /* all internal data structures */
163static DEFINE_MUTEX(pcpu_alloc_mutex);  /* chunk create/destroy, [de]pop, map ext */
164
165static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
166
167/* chunks which need their map areas extended, protected by pcpu_lock */
168static LIST_HEAD(pcpu_map_extend_chunks);
169
170/*
171 * The number of empty populated pages, protected by pcpu_lock.  The
172 * reserved chunk doesn't contribute to the count.
173 */
174static int pcpu_nr_empty_pop_pages;
175
176/*
177 * Balance work is used to populate or destroy chunks asynchronously.  We
178 * try to keep the number of populated free pages between
179 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
180 * empty chunk.
181 */
182static void pcpu_balance_workfn(struct work_struct *work);
183static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
184static bool pcpu_async_enabled __read_mostly;
185static bool pcpu_atomic_alloc_failed;
186
187static void pcpu_schedule_balance_work(void)
188{
189        if (pcpu_async_enabled)
190                schedule_work(&pcpu_balance_work);
191}
192
193static bool pcpu_addr_in_first_chunk(void *addr)
194{
195        void *first_start = pcpu_first_chunk->base_addr;
196
197        return addr >= first_start && addr < first_start + pcpu_unit_size;
198}
199
200static bool pcpu_addr_in_reserved_chunk(void *addr)
201{
202        void *first_start = pcpu_first_chunk->base_addr;
203
204        return addr >= first_start &&
205                addr < first_start + pcpu_reserved_chunk_limit;
206}
207
208static int __pcpu_size_to_slot(int size)
209{
210        int highbit = fls(size);        /* size is in bytes */
211        return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
212}
213
214static int pcpu_size_to_slot(int size)
215{
216        if (size == pcpu_unit_size)
217                return pcpu_nr_slots - 1;
218        return __pcpu_size_to_slot(size);
219}
220
221static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
222{
223        if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
224                return 0;
225
226        return pcpu_size_to_slot(chunk->free_size);
227}
228
229/* set the pointer to a chunk in a page struct */
230static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
231{
232        page->index = (unsigned long)pcpu;
233}
234
235/* obtain pointer to a chunk from a page struct */
236static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
237{
238        return (struct pcpu_chunk *)page->index;
239}
240
241static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
242{
243        return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
244}
245
246static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
247                                     unsigned int cpu, int page_idx)
248{
249        return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
250                (page_idx << PAGE_SHIFT);
251}
252
253static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
254                                           int *rs, int *re, int end)
255{
256        *rs = find_next_zero_bit(chunk->populated, end, *rs);
257        *re = find_next_bit(chunk->populated, end, *rs + 1);
258}
259
260static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
261                                         int *rs, int *re, int end)
262{
263        *rs = find_next_bit(chunk->populated, end, *rs);
264        *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
265}
266
267/*
268 * (Un)populated page region iterators.  Iterate over (un)populated
269 * page regions between @start and @end in @chunk.  @rs and @re should
270 * be integer variables and will be set to start and end page index of
271 * the current region.
272 */
273#define pcpu_for_each_unpop_region(chunk, rs, re, start, end)               \
274        for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
275             (rs) < (re);                                                   \
276             (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
277
278#define pcpu_for_each_pop_region(chunk, rs, re, start, end)                 \
279        for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
280             (rs) < (re);                                                   \
281             (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
282
283/**
284 * pcpu_mem_zalloc - allocate memory
285 * @size: bytes to allocate
286 *
287 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
288 * kzalloc() is used; otherwise, vzalloc() is used.  The returned
289 * memory is always zeroed.
290 *
291 * CONTEXT:
292 * Does GFP_KERNEL allocation.
293 *
294 * RETURNS:
295 * Pointer to the allocated area on success, NULL on failure.
296 */
297static void *pcpu_mem_zalloc(size_t size)
298{
299        if (WARN_ON_ONCE(!slab_is_available()))
300                return NULL;
301
302        if (size <= PAGE_SIZE)
303                return kzalloc(size, GFP_KERNEL);
304        else
305                return vzalloc(size);
306}
307
308/**
309 * pcpu_mem_free - free memory
310 * @ptr: memory to free
311 * @size: size of the area
312 *
313 * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
314 */
315static void pcpu_mem_free(void *ptr, size_t size)
316{
317        if (size <= PAGE_SIZE)
318                kfree(ptr);
319        else
320                vfree(ptr);
321}
322
323/**
324 * pcpu_count_occupied_pages - count the number of pages an area occupies
325 * @chunk: chunk of interest
326 * @i: index of the area in question
327 *
328 * Count the number of pages chunk's @i'th area occupies.  When the area's
329 * start and/or end address isn't aligned to page boundary, the straddled
330 * page is included in the count iff the rest of the page is free.
331 */
332static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i)
333{
334        int off = chunk->map[i] & ~1;
335        int end = chunk->map[i + 1] & ~1;
336
337        if (!PAGE_ALIGNED(off) && i > 0) {
338                int prev = chunk->map[i - 1];
339
340                if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE))
341                        off = round_down(off, PAGE_SIZE);
342        }
343
344        if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) {
345                int next = chunk->map[i + 1];
346                int nend = chunk->map[i + 2] & ~1;
347
348                if (!(next & 1) && nend >= round_up(end, PAGE_SIZE))
349                        end = round_up(end, PAGE_SIZE);
350        }
351
352        return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0);
353}
354
355/**
356 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
357 * @chunk: chunk of interest
358 * @oslot: the previous slot it was on
359 *
360 * This function is called after an allocation or free changed @chunk.
361 * New slot according to the changed state is determined and @chunk is
362 * moved to the slot.  Note that the reserved chunk is never put on
363 * chunk slots.
364 *
365 * CONTEXT:
366 * pcpu_lock.
367 */
368static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
369{
370        int nslot = pcpu_chunk_slot(chunk);
371
372        if (chunk != pcpu_reserved_chunk && oslot != nslot) {
373                if (oslot < nslot)
374                        list_move(&chunk->list, &pcpu_slot[nslot]);
375                else
376                        list_move_tail(&chunk->list, &pcpu_slot[nslot]);
377        }
378}
379
380/**
381 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
382 * @chunk: chunk of interest
383 * @is_atomic: the allocation context
384 *
385 * Determine whether area map of @chunk needs to be extended.  If
386 * @is_atomic, only the amount necessary for a new allocation is
387 * considered; however, async extension is scheduled if the left amount is
388 * low.  If !@is_atomic, it aims for more empty space.  Combined, this
389 * ensures that the map is likely to have enough available space to
390 * accomodate atomic allocations which can't extend maps directly.
391 *
392 * CONTEXT:
393 * pcpu_lock.
394 *
395 * RETURNS:
396 * New target map allocation length if extension is necessary, 0
397 * otherwise.
398 */
399static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
400{
401        int margin, new_alloc;
402
403        lockdep_assert_held(&pcpu_lock);
404
405        if (is_atomic) {
406                margin = 3;
407
408                if (chunk->map_alloc <
409                    chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
410                        if (list_empty(&chunk->map_extend_list)) {
411                                list_add_tail(&chunk->map_extend_list,
412                                              &pcpu_map_extend_chunks);
413                                pcpu_schedule_balance_work();
414                        }
415                }
416        } else {
417                margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
418        }
419
420        if (chunk->map_alloc >= chunk->map_used + margin)
421                return 0;
422
423        new_alloc = PCPU_DFL_MAP_ALLOC;
424        while (new_alloc < chunk->map_used + margin)
425                new_alloc *= 2;
426
427        return new_alloc;
428}
429
430/**
431 * pcpu_extend_area_map - extend area map of a chunk
432 * @chunk: chunk of interest
433 * @new_alloc: new target allocation length of the area map
434 *
435 * Extend area map of @chunk to have @new_alloc entries.
436 *
437 * CONTEXT:
438 * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
439 *
440 * RETURNS:
441 * 0 on success, -errno on failure.
442 */
443static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
444{
445        int *old = NULL, *new = NULL;
446        size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
447        unsigned long flags;
448
449        lockdep_assert_held(&pcpu_alloc_mutex);
450
451        new = pcpu_mem_zalloc(new_size);
452        if (!new)
453                return -ENOMEM;
454
455        /* acquire pcpu_lock and switch to new area map */
456        spin_lock_irqsave(&pcpu_lock, flags);
457
458        if (new_alloc <= chunk->map_alloc)
459                goto out_unlock;
460
461        old_size = chunk->map_alloc * sizeof(chunk->map[0]);
462        old = chunk->map;
463
464        memcpy(new, old, old_size);
465
466        chunk->map_alloc = new_alloc;
467        chunk->map = new;
468        new = NULL;
469
470out_unlock:
471        spin_unlock_irqrestore(&pcpu_lock, flags);
472
473        /*
474         * pcpu_mem_free() might end up calling vfree() which uses
475         * IRQ-unsafe lock and thus can't be called under pcpu_lock.
476         */
477        pcpu_mem_free(old, old_size);
478        pcpu_mem_free(new, new_size);
479
480        return 0;
481}
482
483/**
484 * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
485 * @chunk: chunk the candidate area belongs to
486 * @off: the offset to the start of the candidate area
487 * @this_size: the size of the candidate area
488 * @size: the size of the target allocation
489 * @align: the alignment of the target allocation
490 * @pop_only: only allocate from already populated region
491 *
492 * We're trying to allocate @size bytes aligned at @align.  @chunk's area
493 * at @off sized @this_size is a candidate.  This function determines
494 * whether the target allocation fits in the candidate area and returns the
495 * number of bytes to pad after @off.  If the target area doesn't fit, -1
496 * is returned.
497 *
498 * If @pop_only is %true, this function only considers the already
499 * populated part of the candidate area.
500 */
501static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size,
502                            int size, int align, bool pop_only)
503{
504        int cand_off = off;
505
506        while (true) {
507                int head = ALIGN(cand_off, align) - off;
508                int page_start, page_end, rs, re;
509
510                if (this_size < head + size)
511                        return -1;
512
513                if (!pop_only)
514                        return head;
515
516                /*
517                 * If the first unpopulated page is beyond the end of the
518                 * allocation, the whole allocation is populated;
519                 * otherwise, retry from the end of the unpopulated area.
520                 */
521                page_start = PFN_DOWN(head + off);
522                page_end = PFN_UP(head + off + size);
523
524                rs = page_start;
525                pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size));
526                if (rs >= page_end)
527                        return head;
528                cand_off = re * PAGE_SIZE;
529        }
530}
531
532/**
533 * pcpu_alloc_area - allocate area from a pcpu_chunk
534 * @chunk: chunk of interest
535 * @size: wanted size in bytes
536 * @align: wanted align
537 * @pop_only: allocate only from the populated area
538 * @occ_pages_p: out param for the number of pages the area occupies
539 *
540 * Try to allocate @size bytes area aligned at @align from @chunk.
541 * Note that this function only allocates the offset.  It doesn't
542 * populate or map the area.
543 *
544 * @chunk->map must have at least two free slots.
545 *
546 * CONTEXT:
547 * pcpu_lock.
548 *
549 * RETURNS:
550 * Allocated offset in @chunk on success, -1 if no matching area is
551 * found.
552 */
553static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align,
554                           bool pop_only, int *occ_pages_p)
555{
556        int oslot = pcpu_chunk_slot(chunk);
557        int max_contig = 0;
558        int i, off;
559        bool seen_free = false;
560        int *p;
561
562        for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
563                int head, tail;
564                int this_size;
565
566                off = *p;
567                if (off & 1)
568                        continue;
569
570                this_size = (p[1] & ~1) - off;
571
572                head = pcpu_fit_in_area(chunk, off, this_size, size, align,
573                                        pop_only);
574                if (head < 0) {
575                        if (!seen_free) {
576                                chunk->first_free = i;
577                                seen_free = true;
578                        }
579                        max_contig = max(this_size, max_contig);
580                        continue;
581                }
582
583                /*
584                 * If head is small or the previous block is free,
585                 * merge'em.  Note that 'small' is defined as smaller
586                 * than sizeof(int), which is very small but isn't too
587                 * uncommon for percpu allocations.
588                 */
589                if (head && (head < sizeof(int) || !(p[-1] & 1))) {
590                        *p = off += head;
591                        if (p[-1] & 1)
592                                chunk->free_size -= head;
593                        else
594                                max_contig = max(*p - p[-1], max_contig);
595                        this_size -= head;
596                        head = 0;
597                }
598
599                /* if tail is small, just keep it around */
600                tail = this_size - head - size;
601                if (tail < sizeof(int)) {
602                        tail = 0;
603                        size = this_size - head;
604                }
605
606                /* split if warranted */
607                if (head || tail) {
608                        int nr_extra = !!head + !!tail;
609
610                        /* insert new subblocks */
611                        memmove(p + nr_extra + 1, p + 1,
612                                sizeof(chunk->map[0]) * (chunk->map_used - i));
613                        chunk->map_used += nr_extra;
614
615                        if (head) {
616                                if (!seen_free) {
617                                        chunk->first_free = i;
618                                        seen_free = true;
619                                }
620                                *++p = off += head;
621                                ++i;
622                                max_contig = max(head, max_contig);
623                        }
624                        if (tail) {
625                                p[1] = off + size;
626                                max_contig = max(tail, max_contig);
627                        }
628                }
629
630                if (!seen_free)
631                        chunk->first_free = i + 1;
632
633                /* update hint and mark allocated */
634                if (i + 1 == chunk->map_used)
635                        chunk->contig_hint = max_contig; /* fully scanned */
636                else
637                        chunk->contig_hint = max(chunk->contig_hint,
638                                                 max_contig);
639
640                chunk->free_size -= size;
641                *p |= 1;
642
643                *occ_pages_p = pcpu_count_occupied_pages(chunk, i);
644                pcpu_chunk_relocate(chunk, oslot);
645                return off;
646        }
647
648        chunk->contig_hint = max_contig;        /* fully scanned */
649        pcpu_chunk_relocate(chunk, oslot);
650
651        /* tell the upper layer that this chunk has no matching area */
652        return -1;
653}
654
655/**
656 * pcpu_free_area - free area to a pcpu_chunk
657 * @chunk: chunk of interest
658 * @freeme: offset of area to free
659 * @occ_pages_p: out param for the number of pages the area occupies
660 *
661 * Free area starting from @freeme to @chunk.  Note that this function
662 * only modifies the allocation map.  It doesn't depopulate or unmap
663 * the area.
664 *
665 * CONTEXT:
666 * pcpu_lock.
667 */
668static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme,
669                           int *occ_pages_p)
670{
671        int oslot = pcpu_chunk_slot(chunk);
672        int off = 0;
673        unsigned i, j;
674        int to_free = 0;
675        int *p;
676
677        freeme |= 1;    /* we are searching for <given offset, in use> pair */
678
679        i = 0;
680        j = chunk->map_used;
681        while (i != j) {
682                unsigned k = (i + j) / 2;
683                off = chunk->map[k];
684                if (off < freeme)
685                        i = k + 1;
686                else if (off > freeme)
687                        j = k;
688                else
689                        i = j = k;
690        }
691        BUG_ON(off != freeme);
692
693        if (i < chunk->first_free)
694                chunk->first_free = i;
695
696        p = chunk->map + i;
697        *p = off &= ~1;
698        chunk->free_size += (p[1] & ~1) - off;
699
700        *occ_pages_p = pcpu_count_occupied_pages(chunk, i);
701
702        /* merge with next? */
703        if (!(p[1] & 1))
704                to_free++;
705        /* merge with previous? */
706        if (i > 0 && !(p[-1] & 1)) {
707                to_free++;
708                i--;
709                p--;
710        }
711        if (to_free) {
712                chunk->map_used -= to_free;
713                memmove(p + 1, p + 1 + to_free,
714                        (chunk->map_used - i) * sizeof(chunk->map[0]));
715        }
716
717        chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
718        pcpu_chunk_relocate(chunk, oslot);
719}
720
721static struct pcpu_chunk *pcpu_alloc_chunk(void)
722{
723        struct pcpu_chunk *chunk;
724
725        chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
726        if (!chunk)
727                return NULL;
728
729        chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
730                                                sizeof(chunk->map[0]));
731        if (!chunk->map) {
732                pcpu_mem_free(chunk, pcpu_chunk_struct_size);
733                return NULL;
734        }
735
736        chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
737        chunk->map[0] = 0;
738        chunk->map[1] = pcpu_unit_size | 1;
739        chunk->map_used = 1;
740
741        INIT_LIST_HEAD(&chunk->list);
742        INIT_LIST_HEAD(&chunk->map_extend_list);
743        chunk->free_size = pcpu_unit_size;
744        chunk->contig_hint = pcpu_unit_size;
745
746        return chunk;
747}
748
749static void pcpu_free_chunk(struct pcpu_chunk *chunk)
750{
751        if (!chunk)
752                return;
753        pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
754        pcpu_mem_free(chunk, pcpu_chunk_struct_size);
755}
756
757/**
758 * pcpu_chunk_populated - post-population bookkeeping
759 * @chunk: pcpu_chunk which got populated
760 * @page_start: the start page
761 * @page_end: the end page
762 *
763 * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
764 * the bookkeeping information accordingly.  Must be called after each
765 * successful population.
766 */
767static void pcpu_chunk_populated(struct pcpu_chunk *chunk,
768                                 int page_start, int page_end)
769{
770        int nr = page_end - page_start;
771
772        lockdep_assert_held(&pcpu_lock);
773
774        bitmap_set(chunk->populated, page_start, nr);
775        chunk->nr_populated += nr;
776        pcpu_nr_empty_pop_pages += nr;
777}
778
779/**
780 * pcpu_chunk_depopulated - post-depopulation bookkeeping
781 * @chunk: pcpu_chunk which got depopulated
782 * @page_start: the start page
783 * @page_end: the end page
784 *
785 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
786 * Update the bookkeeping information accordingly.  Must be called after
787 * each successful depopulation.
788 */
789static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
790                                   int page_start, int page_end)
791{
792        int nr = page_end - page_start;
793
794        lockdep_assert_held(&pcpu_lock);
795
796        bitmap_clear(chunk->populated, page_start, nr);
797        chunk->nr_populated -= nr;
798        pcpu_nr_empty_pop_pages -= nr;
799}
800
801/*
802 * Chunk management implementation.
803 *
804 * To allow different implementations, chunk alloc/free and
805 * [de]population are implemented in a separate file which is pulled
806 * into this file and compiled together.  The following functions
807 * should be implemented.
808 *
809 * pcpu_populate_chunk          - populate the specified range of a chunk
810 * pcpu_depopulate_chunk        - depopulate the specified range of a chunk
811 * pcpu_create_chunk            - create a new chunk
812 * pcpu_destroy_chunk           - destroy a chunk, always preceded by full depop
813 * pcpu_addr_to_page            - translate address to physical address
814 * pcpu_verify_alloc_info       - check alloc_info is acceptable during init
815 */
816static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
817static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
818static struct pcpu_chunk *pcpu_create_chunk(void);
819static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
820static struct page *pcpu_addr_to_page(void *addr);
821static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
822
823#ifdef CONFIG_NEED_PER_CPU_KM
824#include "percpu-km.c"
825#else
826#include "percpu-vm.c"
827#endif
828
829/**
830 * pcpu_chunk_addr_search - determine chunk containing specified address
831 * @addr: address for which the chunk needs to be determined.
832 *
833 * RETURNS:
834 * The address of the found chunk.
835 */
836static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
837{
838        /* is it in the first chunk? */
839        if (pcpu_addr_in_first_chunk(addr)) {
840                /* is it in the reserved area? */
841                if (pcpu_addr_in_reserved_chunk(addr))
842                        return pcpu_reserved_chunk;
843                return pcpu_first_chunk;
844        }
845
846        /*
847         * The address is relative to unit0 which might be unused and
848         * thus unmapped.  Offset the address to the unit space of the
849         * current processor before looking it up in the vmalloc
850         * space.  Note that any possible cpu id can be used here, so
851         * there's no need to worry about preemption or cpu hotplug.
852         */
853        addr += pcpu_unit_offsets[raw_smp_processor_id()];
854        return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
855}
856
857/**
858 * pcpu_alloc - the percpu allocator
859 * @size: size of area to allocate in bytes
860 * @align: alignment of area (max PAGE_SIZE)
861 * @reserved: allocate from the reserved chunk if available
862 * @gfp: allocation flags
863 *
864 * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
865 * contain %GFP_KERNEL, the allocation is atomic.
866 *
867 * RETURNS:
868 * Percpu pointer to the allocated area on success, NULL on failure.
869 */
870static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
871                                 gfp_t gfp)
872{
873        static int warn_limit = 10;
874        struct pcpu_chunk *chunk;
875        const char *err;
876        bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
877        int occ_pages = 0;
878        int slot, off, new_alloc, cpu, ret;
879        unsigned long flags;
880        void __percpu *ptr;
881
882        /*
883         * We want the lowest bit of offset available for in-use/free
884         * indicator, so force >= 16bit alignment and make size even.
885         */
886        if (unlikely(align < 2))
887                align = 2;
888
889        size = ALIGN(size, 2);
890
891        if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
892                WARN(true, "illegal size (%zu) or align (%zu) for "
893                     "percpu allocation\n", size, align);
894                return NULL;
895        }
896
897        if (!is_atomic)
898                mutex_lock(&pcpu_alloc_mutex);
899
900        spin_lock_irqsave(&pcpu_lock, flags);
901
902        /* serve reserved allocations from the reserved chunk if available */
903        if (reserved && pcpu_reserved_chunk) {
904                chunk = pcpu_reserved_chunk;
905
906                if (size > chunk->contig_hint) {
907                        err = "alloc from reserved chunk failed";
908                        goto fail_unlock;
909                }
910
911                while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) {
912                        spin_unlock_irqrestore(&pcpu_lock, flags);
913                        if (is_atomic ||
914                            pcpu_extend_area_map(chunk, new_alloc) < 0) {
915                                err = "failed to extend area map of reserved chunk";
916                                goto fail;
917                        }
918                        spin_lock_irqsave(&pcpu_lock, flags);
919                }
920
921                off = pcpu_alloc_area(chunk, size, align, is_atomic,
922                                      &occ_pages);
923                if (off >= 0)
924                        goto area_found;
925
926                err = "alloc from reserved chunk failed";
927                goto fail_unlock;
928        }
929
930restart:
931        /* search through normal chunks */
932        for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
933                list_for_each_entry(chunk, &pcpu_slot[slot], list) {
934                        if (size > chunk->contig_hint)
935                                continue;
936
937                        new_alloc = pcpu_need_to_extend(chunk, is_atomic);
938                        if (new_alloc) {
939                                if (is_atomic)
940                                        continue;
941                                spin_unlock_irqrestore(&pcpu_lock, flags);
942                                if (pcpu_extend_area_map(chunk,
943                                                         new_alloc) < 0) {
944                                        err = "failed to extend area map";
945                                        goto fail;
946                                }
947                                spin_lock_irqsave(&pcpu_lock, flags);
948                                /*
949                                 * pcpu_lock has been dropped, need to
950                                 * restart cpu_slot list walking.
951                                 */
952                                goto restart;
953                        }
954
955                        off = pcpu_alloc_area(chunk, size, align, is_atomic,
956                                              &occ_pages);
957                        if (off >= 0)
958                                goto area_found;
959                }
960        }
961
962        spin_unlock_irqrestore(&pcpu_lock, flags);
963
964        /*
965         * No space left.  Create a new chunk.  We don't want multiple
966         * tasks to create chunks simultaneously.  Serialize and create iff
967         * there's still no empty chunk after grabbing the mutex.
968         */
969        if (is_atomic)
970                goto fail;
971
972        if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
973                chunk = pcpu_create_chunk();
974                if (!chunk) {
975                        err = "failed to allocate new chunk";
976                        goto fail;
977                }
978
979                spin_lock_irqsave(&pcpu_lock, flags);
980                pcpu_chunk_relocate(chunk, -1);
981        } else {
982                spin_lock_irqsave(&pcpu_lock, flags);
983        }
984
985        goto restart;
986
987area_found:
988        spin_unlock_irqrestore(&pcpu_lock, flags);
989
990        /* populate if not all pages are already there */
991        if (!is_atomic) {
992                int page_start, page_end, rs, re;
993
994                page_start = PFN_DOWN(off);
995                page_end = PFN_UP(off + size);
996
997                pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
998                        WARN_ON(chunk->immutable);
999
1000                        ret = pcpu_populate_chunk(chunk, rs, re);
1001
1002                        spin_lock_irqsave(&pcpu_lock, flags);
1003                        if (ret) {
1004                                pcpu_free_area(chunk, off, &occ_pages);
1005                                err = "failed to populate";
1006                                goto fail_unlock;
1007                        }
1008                        pcpu_chunk_populated(chunk, rs, re);
1009                        spin_unlock_irqrestore(&pcpu_lock, flags);
1010                }
1011
1012                mutex_unlock(&pcpu_alloc_mutex);
1013        }
1014
1015        if (chunk != pcpu_reserved_chunk) {
1016                spin_lock_irqsave(&pcpu_lock, flags);
1017                pcpu_nr_empty_pop_pages -= occ_pages;
1018                spin_unlock_irqrestore(&pcpu_lock, flags);
1019        }
1020
1021        if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1022                pcpu_schedule_balance_work();
1023
1024        /* clear the areas and return address relative to base address */
1025        for_each_possible_cpu(cpu)
1026                memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1027
1028        ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1029        kmemleak_alloc_percpu(ptr, size, gfp);
1030        return ptr;
1031
1032fail_unlock:
1033        spin_unlock_irqrestore(&pcpu_lock, flags);
1034fail:
1035        if (!is_atomic && warn_limit) {
1036                pr_warning("PERCPU: allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1037                           size, align, is_atomic, err);
1038                dump_stack();
1039                if (!--warn_limit)
1040                        pr_info("PERCPU: limit reached, disable warning\n");
1041        }
1042        if (is_atomic) {
1043                /* see the flag handling in pcpu_blance_workfn() */
1044                pcpu_atomic_alloc_failed = true;
1045                pcpu_schedule_balance_work();
1046        } else {
1047                mutex_unlock(&pcpu_alloc_mutex);
1048        }
1049        return NULL;
1050}
1051
1052/**
1053 * __alloc_percpu_gfp - allocate dynamic percpu area
1054 * @size: size of area to allocate in bytes
1055 * @align: alignment of area (max PAGE_SIZE)
1056 * @gfp: allocation flags
1057 *
1058 * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
1059 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1060 * be called from any context but is a lot more likely to fail.
1061 *
1062 * RETURNS:
1063 * Percpu pointer to the allocated area on success, NULL on failure.
1064 */
1065void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1066{
1067        return pcpu_alloc(size, align, false, gfp);
1068}
1069EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1070
1071/**
1072 * __alloc_percpu - allocate dynamic percpu area
1073 * @size: size of area to allocate in bytes
1074 * @align: alignment of area (max PAGE_SIZE)
1075 *
1076 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1077 */
1078void __percpu *__alloc_percpu(size_t size, size_t align)
1079{
1080        return pcpu_alloc(size, align, false, GFP_KERNEL);
1081}
1082EXPORT_SYMBOL_GPL(__alloc_percpu);
1083
1084/**
1085 * __alloc_reserved_percpu - allocate reserved percpu area
1086 * @size: size of area to allocate in bytes
1087 * @align: alignment of area (max PAGE_SIZE)
1088 *
1089 * Allocate zero-filled percpu area of @size bytes aligned at @align
1090 * from reserved percpu area if arch has set it up; otherwise,
1091 * allocation is served from the same dynamic area.  Might sleep.
1092 * Might trigger writeouts.
1093 *
1094 * CONTEXT:
1095 * Does GFP_KERNEL allocation.
1096 *
1097 * RETURNS:
1098 * Percpu pointer to the allocated area on success, NULL on failure.
1099 */
1100void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1101{
1102        return pcpu_alloc(size, align, true, GFP_KERNEL);
1103}
1104
1105/**
1106 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
1107 * @work: unused
1108 *
1109 * Reclaim all fully free chunks except for the first one.
1110 */
1111static void pcpu_balance_workfn(struct work_struct *work)
1112{
1113        LIST_HEAD(to_free);
1114        struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1115        struct pcpu_chunk *chunk, *next;
1116        int slot, nr_to_pop, ret;
1117
1118        /*
1119         * There's no reason to keep around multiple unused chunks and VM
1120         * areas can be scarce.  Destroy all free chunks except for one.
1121         */
1122        mutex_lock(&pcpu_alloc_mutex);
1123        spin_lock_irq(&pcpu_lock);
1124
1125        list_for_each_entry_safe(chunk, next, free_head, list) {
1126                WARN_ON(chunk->immutable);
1127
1128                /* spare the first one */
1129                if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1130                        continue;
1131
1132                list_del_init(&chunk->map_extend_list);
1133                list_move(&chunk->list, &to_free);
1134        }
1135
1136        spin_unlock_irq(&pcpu_lock);
1137
1138        list_for_each_entry_safe(chunk, next, &to_free, list) {
1139                int rs, re;
1140
1141                pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) {
1142                        pcpu_depopulate_chunk(chunk, rs, re);
1143                        spin_lock_irq(&pcpu_lock);
1144                        pcpu_chunk_depopulated(chunk, rs, re);
1145                        spin_unlock_irq(&pcpu_lock);
1146                }
1147                pcpu_destroy_chunk(chunk);
1148        }
1149
1150        /* service chunks which requested async area map extension */
1151        do {
1152                int new_alloc = 0;
1153
1154                spin_lock_irq(&pcpu_lock);
1155
1156                chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
1157                                        struct pcpu_chunk, map_extend_list);
1158                if (chunk) {
1159                        list_del_init(&chunk->map_extend_list);
1160                        new_alloc = pcpu_need_to_extend(chunk, false);
1161                }
1162
1163                spin_unlock_irq(&pcpu_lock);
1164
1165                if (new_alloc)
1166                        pcpu_extend_area_map(chunk, new_alloc);
1167        } while (chunk);
1168
1169        /*
1170         * Ensure there are certain number of free populated pages for
1171         * atomic allocs.  Fill up from the most packed so that atomic
1172         * allocs don't increase fragmentation.  If atomic allocation
1173         * failed previously, always populate the maximum amount.  This
1174         * should prevent atomic allocs larger than PAGE_SIZE from keeping
1175         * failing indefinitely; however, large atomic allocs are not
1176         * something we support properly and can be highly unreliable and
1177         * inefficient.
1178         */
1179retry_pop:
1180        if (pcpu_atomic_alloc_failed) {
1181                nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
1182                /* best effort anyway, don't worry about synchronization */
1183                pcpu_atomic_alloc_failed = false;
1184        } else {
1185                nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
1186                                  pcpu_nr_empty_pop_pages,
1187                                  0, PCPU_EMPTY_POP_PAGES_HIGH);
1188        }
1189
1190        for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
1191                int nr_unpop = 0, rs, re;
1192
1193                if (!nr_to_pop)
1194                        break;
1195
1196                spin_lock_irq(&pcpu_lock);
1197                list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1198                        nr_unpop = pcpu_unit_pages - chunk->nr_populated;
1199                        if (nr_unpop)
1200                                break;
1201                }
1202                spin_unlock_irq(&pcpu_lock);
1203
1204                if (!nr_unpop)
1205                        continue;
1206
1207                /* @chunk can't go away while pcpu_alloc_mutex is held */
1208                pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) {
1209                        int nr = min(re - rs, nr_to_pop);
1210
1211                        ret = pcpu_populate_chunk(chunk, rs, rs + nr);
1212                        if (!ret) {
1213                                nr_to_pop -= nr;
1214                                spin_lock_irq(&pcpu_lock);
1215                                pcpu_chunk_populated(chunk, rs, rs + nr);
1216                                spin_unlock_irq(&pcpu_lock);
1217                        } else {
1218                                nr_to_pop = 0;
1219                        }
1220
1221                        if (!nr_to_pop)
1222                                break;
1223                }
1224        }
1225
1226        if (nr_to_pop) {
1227                /* ran out of chunks to populate, create a new one and retry */
1228                chunk = pcpu_create_chunk();
1229                if (chunk) {
1230                        spin_lock_irq(&pcpu_lock);
1231                        pcpu_chunk_relocate(chunk, -1);
1232                        spin_unlock_irq(&pcpu_lock);
1233                        goto retry_pop;
1234                }
1235        }
1236
1237        mutex_unlock(&pcpu_alloc_mutex);
1238}
1239
1240/**
1241 * free_percpu - free percpu area
1242 * @ptr: pointer to area to free
1243 *
1244 * Free percpu area @ptr.
1245 *
1246 * CONTEXT:
1247 * Can be called from atomic context.
1248 */
1249void free_percpu(void __percpu *ptr)
1250{
1251        void *addr;
1252        struct pcpu_chunk *chunk;
1253        unsigned long flags;
1254        int off, occ_pages;
1255
1256        if (!ptr)
1257                return;
1258
1259        kmemleak_free_percpu(ptr);
1260
1261        addr = __pcpu_ptr_to_addr(ptr);
1262
1263        spin_lock_irqsave(&pcpu_lock, flags);
1264
1265        chunk = pcpu_chunk_addr_search(addr);
1266        off = addr - chunk->base_addr;
1267
1268        pcpu_free_area(chunk, off, &occ_pages);
1269
1270        if (chunk != pcpu_reserved_chunk)
1271                pcpu_nr_empty_pop_pages += occ_pages;
1272
1273        /* if there are more than one fully free chunks, wake up grim reaper */
1274        if (chunk->free_size == pcpu_unit_size) {
1275                struct pcpu_chunk *pos;
1276
1277                list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1278                        if (pos != chunk) {
1279                                pcpu_schedule_balance_work();
1280                                break;
1281                        }
1282        }
1283
1284        spin_unlock_irqrestore(&pcpu_lock, flags);
1285}
1286EXPORT_SYMBOL_GPL(free_percpu);
1287
1288/**
1289 * is_kernel_percpu_address - test whether address is from static percpu area
1290 * @addr: address to test
1291 *
1292 * Test whether @addr belongs to in-kernel static percpu area.  Module
1293 * static percpu areas are not considered.  For those, use
1294 * is_module_percpu_address().
1295 *
1296 * RETURNS:
1297 * %true if @addr is from in-kernel static percpu area, %false otherwise.
1298 */
1299bool is_kernel_percpu_address(unsigned long addr)
1300{
1301#ifdef CONFIG_SMP
1302        const size_t static_size = __per_cpu_end - __per_cpu_start;
1303        void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1304        unsigned int cpu;
1305
1306        for_each_possible_cpu(cpu) {
1307                void *start = per_cpu_ptr(base, cpu);
1308
1309                if ((void *)addr >= start && (void *)addr < start + static_size)
1310                        return true;
1311        }
1312#endif
1313        /* on UP, can't distinguish from other static vars, always false */
1314        return false;
1315}
1316
1317/**
1318 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1319 * @addr: the address to be converted to physical address
1320 *
1321 * Given @addr which is dereferenceable address obtained via one of
1322 * percpu access macros, this function translates it into its physical
1323 * address.  The caller is responsible for ensuring @addr stays valid
1324 * until this function finishes.
1325 *
1326 * percpu allocator has special setup for the first chunk, which currently
1327 * supports either embedding in linear address space or vmalloc mapping,
1328 * and, from the second one, the backing allocator (currently either vm or
1329 * km) provides translation.
1330 *
1331 * The addr can be tranlated simply without checking if it falls into the
1332 * first chunk. But the current code reflects better how percpu allocator
1333 * actually works, and the verification can discover both bugs in percpu
1334 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
1335 * code.
1336 *
1337 * RETURNS:
1338 * The physical address for @addr.
1339 */
1340phys_addr_t per_cpu_ptr_to_phys(void *addr)
1341{
1342        void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1343        bool in_first_chunk = false;
1344        unsigned long first_low, first_high;
1345        unsigned int cpu;
1346
1347        /*
1348         * The following test on unit_low/high isn't strictly
1349         * necessary but will speed up lookups of addresses which
1350         * aren't in the first chunk.
1351         */
1352        first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
1353        first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
1354                                     pcpu_unit_pages);
1355        if ((unsigned long)addr >= first_low &&
1356            (unsigned long)addr < first_high) {
1357                for_each_possible_cpu(cpu) {
1358                        void *start = per_cpu_ptr(base, cpu);
1359
1360                        if (addr >= start && addr < start + pcpu_unit_size) {
1361                                in_first_chunk = true;
1362                                break;
1363                        }
1364                }
1365        }
1366
1367        if (in_first_chunk) {
1368                if (!is_vmalloc_addr(addr))
1369                        return __pa(addr);
1370                else
1371                        return page_to_phys(vmalloc_to_page(addr)) +
1372                               offset_in_page(addr);
1373        } else
1374                return page_to_phys(pcpu_addr_to_page(addr)) +
1375                       offset_in_page(addr);
1376}
1377
1378/**
1379 * pcpu_alloc_alloc_info - allocate percpu allocation info
1380 * @nr_groups: the number of groups
1381 * @nr_units: the number of units
1382 *
1383 * Allocate ai which is large enough for @nr_groups groups containing
1384 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
1385 * cpu_map array which is long enough for @nr_units and filled with
1386 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
1387 * pointer of other groups.
1388 *
1389 * RETURNS:
1390 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1391 * failure.
1392 */
1393struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1394                                                      int nr_units)
1395{
1396        struct pcpu_alloc_info *ai;
1397        size_t base_size, ai_size;
1398        void *ptr;
1399        int unit;
1400
1401        base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1402                          __alignof__(ai->groups[0].cpu_map[0]));
1403        ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1404
1405        ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1406        if (!ptr)
1407                return NULL;
1408        ai = ptr;
1409        ptr += base_size;
1410
1411        ai->groups[0].cpu_map = ptr;
1412
1413        for (unit = 0; unit < nr_units; unit++)
1414                ai->groups[0].cpu_map[unit] = NR_CPUS;
1415
1416        ai->nr_groups = nr_groups;
1417        ai->__ai_size = PFN_ALIGN(ai_size);
1418
1419        return ai;
1420}
1421
1422/**
1423 * pcpu_free_alloc_info - free percpu allocation info
1424 * @ai: pcpu_alloc_info to free
1425 *
1426 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1427 */
1428void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1429{
1430        memblock_free_early(__pa(ai), ai->__ai_size);
1431}
1432
1433/**
1434 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1435 * @lvl: loglevel
1436 * @ai: allocation info to dump
1437 *
1438 * Print out information about @ai using loglevel @lvl.
1439 */
1440static void pcpu_dump_alloc_info(const char *lvl,
1441                                 const struct pcpu_alloc_info *ai)
1442{
1443        int group_width = 1, cpu_width = 1, width;
1444        char empty_str[] = "--------";
1445        int alloc = 0, alloc_end = 0;
1446        int group, v;
1447        int upa, apl;   /* units per alloc, allocs per line */
1448
1449        v = ai->nr_groups;
1450        while (v /= 10)
1451                group_width++;
1452
1453        v = num_possible_cpus();
1454        while (v /= 10)
1455                cpu_width++;
1456        empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1457
1458        upa = ai->alloc_size / ai->unit_size;
1459        width = upa * (cpu_width + 1) + group_width + 3;
1460        apl = rounddown_pow_of_two(max(60 / width, 1));
1461
1462        printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1463               lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1464               ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1465
1466        for (group = 0; group < ai->nr_groups; group++) {
1467                const struct pcpu_group_info *gi = &ai->groups[group];
1468                int unit = 0, unit_end = 0;
1469
1470                BUG_ON(gi->nr_units % upa);
1471                for (alloc_end += gi->nr_units / upa;
1472                     alloc < alloc_end; alloc++) {
1473                        if (!(alloc % apl)) {
1474                                printk(KERN_CONT "\n");
1475                                printk("%spcpu-alloc: ", lvl);
1476                        }
1477                        printk(KERN_CONT "[%0*d] ", group_width, group);
1478
1479                        for (unit_end += upa; unit < unit_end; unit++)
1480                                if (gi->cpu_map[unit] != NR_CPUS)
1481                                        printk(KERN_CONT "%0*d ", cpu_width,
1482                                               gi->cpu_map[unit]);
1483                                else
1484                                        printk(KERN_CONT "%s ", empty_str);
1485                }
1486        }
1487        printk(KERN_CONT "\n");
1488}
1489
1490/**
1491 * pcpu_setup_first_chunk - initialize the first percpu chunk
1492 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1493 * @base_addr: mapped address
1494 *
1495 * Initialize the first percpu chunk which contains the kernel static
1496 * perpcu area.  This function is to be called from arch percpu area
1497 * setup path.
1498 *
1499 * @ai contains all information necessary to initialize the first
1500 * chunk and prime the dynamic percpu allocator.
1501 *
1502 * @ai->static_size is the size of static percpu area.
1503 *
1504 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1505 * reserve after the static area in the first chunk.  This reserves
1506 * the first chunk such that it's available only through reserved
1507 * percpu allocation.  This is primarily used to serve module percpu
1508 * static areas on architectures where the addressing model has
1509 * limited offset range for symbol relocations to guarantee module
1510 * percpu symbols fall inside the relocatable range.
1511 *
1512 * @ai->dyn_size determines the number of bytes available for dynamic
1513 * allocation in the first chunk.  The area between @ai->static_size +
1514 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1515 *
1516 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1517 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1518 * @ai->dyn_size.
1519 *
1520 * @ai->atom_size is the allocation atom size and used as alignment
1521 * for vm areas.
1522 *
1523 * @ai->alloc_size is the allocation size and always multiple of
1524 * @ai->atom_size.  This is larger than @ai->atom_size if
1525 * @ai->unit_size is larger than @ai->atom_size.
1526 *
1527 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1528 * percpu areas.  Units which should be colocated are put into the
1529 * same group.  Dynamic VM areas will be allocated according to these
1530 * groupings.  If @ai->nr_groups is zero, a single group containing
1531 * all units is assumed.
1532 *
1533 * The caller should have mapped the first chunk at @base_addr and
1534 * copied static data to each unit.
1535 *
1536 * If the first chunk ends up with both reserved and dynamic areas, it
1537 * is served by two chunks - one to serve the core static and reserved
1538 * areas and the other for the dynamic area.  They share the same vm
1539 * and page map but uses different area allocation map to stay away
1540 * from each other.  The latter chunk is circulated in the chunk slots
1541 * and available for dynamic allocation like any other chunks.
1542 *
1543 * RETURNS:
1544 * 0 on success, -errno on failure.
1545 */
1546int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1547                                  void *base_addr)
1548{
1549        static char cpus_buf[4096] __initdata;
1550        static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1551        static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1552        size_t dyn_size = ai->dyn_size;
1553        size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1554        struct pcpu_chunk *schunk, *dchunk = NULL;
1555        unsigned long *group_offsets;
1556        size_t *group_sizes;
1557        unsigned long *unit_off;
1558        unsigned int cpu;
1559        int *unit_map;
1560        int group, unit, i;
1561
1562        cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1563
1564#define PCPU_SETUP_BUG_ON(cond) do {                                    \
1565        if (unlikely(cond)) {                                           \
1566                pr_emerg("PERCPU: failed to initialize, %s", #cond);    \
1567                pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf);   \
1568                pcpu_dump_alloc_info(KERN_EMERG, ai);                   \
1569                BUG();                                                  \
1570        }                                                               \
1571} while (0)
1572
1573        /* sanity checks */
1574        PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1575#ifdef CONFIG_SMP
1576        PCPU_SETUP_BUG_ON(!ai->static_size);
1577        PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
1578#endif
1579        PCPU_SETUP_BUG_ON(!base_addr);
1580        PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK);
1581        PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1582        PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1583        PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1584        PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1585        PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1586
1587        /* process group information and build config tables accordingly */
1588        group_offsets = memblock_virt_alloc(ai->nr_groups *
1589                                             sizeof(group_offsets[0]), 0);
1590        group_sizes = memblock_virt_alloc(ai->nr_groups *
1591                                           sizeof(group_sizes[0]), 0);
1592        unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
1593        unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
1594
1595        for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1596                unit_map[cpu] = UINT_MAX;
1597
1598        pcpu_low_unit_cpu = NR_CPUS;
1599        pcpu_high_unit_cpu = NR_CPUS;
1600
1601        for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1602                const struct pcpu_group_info *gi = &ai->groups[group];
1603
1604                group_offsets[group] = gi->base_offset;
1605                group_sizes[group] = gi->nr_units * ai->unit_size;
1606
1607                for (i = 0; i < gi->nr_units; i++) {
1608                        cpu = gi->cpu_map[i];
1609                        if (cpu == NR_CPUS)
1610                                continue;
1611
1612                        PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1613                        PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1614                        PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1615
1616                        unit_map[cpu] = unit + i;
1617                        unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1618
1619                        /* determine low/high unit_cpu */
1620                        if (pcpu_low_unit_cpu == NR_CPUS ||
1621                            unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1622                                pcpu_low_unit_cpu = cpu;
1623                        if (pcpu_high_unit_cpu == NR_CPUS ||
1624                            unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1625                                pcpu_high_unit_cpu = cpu;
1626                }
1627        }
1628        pcpu_nr_units = unit;
1629
1630        for_each_possible_cpu(cpu)
1631                PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1632
1633        /* we're done parsing the input, undefine BUG macro and dump config */
1634#undef PCPU_SETUP_BUG_ON
1635        pcpu_dump_alloc_info(KERN_DEBUG, ai);
1636
1637        pcpu_nr_groups = ai->nr_groups;
1638        pcpu_group_offsets = group_offsets;
1639        pcpu_group_sizes = group_sizes;
1640        pcpu_unit_map = unit_map;
1641        pcpu_unit_offsets = unit_off;
1642
1643        /* determine basic parameters */
1644        pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1645        pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1646        pcpu_atom_size = ai->atom_size;
1647        pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1648                BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1649
1650        /*
1651         * Allocate chunk slots.  The additional last slot is for
1652         * empty chunks.
1653         */
1654        pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1655        pcpu_slot = memblock_virt_alloc(
1656                        pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
1657        for (i = 0; i < pcpu_nr_slots; i++)
1658                INIT_LIST_HEAD(&pcpu_slot[i]);
1659
1660        /*
1661         * Initialize static chunk.  If reserved_size is zero, the
1662         * static chunk covers static area + dynamic allocation area
1663         * in the first chunk.  If reserved_size is not zero, it
1664         * covers static area + reserved area (mostly used for module
1665         * static percpu allocation).
1666         */
1667        schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1668        INIT_LIST_HEAD(&schunk->list);
1669        INIT_LIST_HEAD(&schunk->map_extend_list);
1670        schunk->base_addr = base_addr;
1671        schunk->map = smap;
1672        schunk->map_alloc = ARRAY_SIZE(smap);
1673        schunk->immutable = true;
1674        bitmap_fill(schunk->populated, pcpu_unit_pages);
1675        schunk->nr_populated = pcpu_unit_pages;
1676
1677        if (ai->reserved_size) {
1678                schunk->free_size = ai->reserved_size;
1679                pcpu_reserved_chunk = schunk;
1680                pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1681        } else {
1682                schunk->free_size = dyn_size;
1683                dyn_size = 0;                   /* dynamic area covered */
1684        }
1685        schunk->contig_hint = schunk->free_size;
1686
1687        schunk->map[0] = 1;
1688        schunk->map[1] = ai->static_size;
1689        schunk->map_used = 1;
1690        if (schunk->free_size)
1691                schunk->map[++schunk->map_used] = 1 | (ai->static_size + schunk->free_size);
1692        else
1693                schunk->map[1] |= 1;
1694
1695        /* init dynamic chunk if necessary */
1696        if (dyn_size) {
1697                dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1698                INIT_LIST_HEAD(&dchunk->list);
1699                INIT_LIST_HEAD(&dchunk->map_extend_list);
1700                dchunk->base_addr = base_addr;
1701                dchunk->map = dmap;
1702                dchunk->map_alloc = ARRAY_SIZE(dmap);
1703                dchunk->immutable = true;
1704                bitmap_fill(dchunk->populated, pcpu_unit_pages);
1705                dchunk->nr_populated = pcpu_unit_pages;
1706
1707                dchunk->contig_hint = dchunk->free_size = dyn_size;
1708                dchunk->map[0] = 1;
1709                dchunk->map[1] = pcpu_reserved_chunk_limit;
1710                dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
1711                dchunk->map_used = 2;
1712        }
1713
1714        /* link the first chunk in */
1715        pcpu_first_chunk = dchunk ?: schunk;
1716        pcpu_nr_empty_pop_pages +=
1717                pcpu_count_occupied_pages(pcpu_first_chunk, 1);
1718        pcpu_chunk_relocate(pcpu_first_chunk, -1);
1719
1720        /* we're done */
1721        pcpu_base_addr = base_addr;
1722        return 0;
1723}
1724
1725#ifdef CONFIG_SMP
1726
1727const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
1728        [PCPU_FC_AUTO]  = "auto",
1729        [PCPU_FC_EMBED] = "embed",
1730        [PCPU_FC_PAGE]  = "page",
1731};
1732
1733enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1734
1735static int __init percpu_alloc_setup(char *str)
1736{
1737        if (!str)
1738                return -EINVAL;
1739
1740        if (0)
1741                /* nada */;
1742#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1743        else if (!strcmp(str, "embed"))
1744                pcpu_chosen_fc = PCPU_FC_EMBED;
1745#endif
1746#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1747        else if (!strcmp(str, "page"))
1748                pcpu_chosen_fc = PCPU_FC_PAGE;
1749#endif
1750        else
1751                pr_warning("PERCPU: unknown allocator %s specified\n", str);
1752
1753        return 0;
1754}
1755early_param("percpu_alloc", percpu_alloc_setup);
1756
1757/*
1758 * pcpu_embed_first_chunk() is used by the generic percpu setup.
1759 * Build it if needed by the arch config or the generic setup is going
1760 * to be used.
1761 */
1762#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1763        !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1764#define BUILD_EMBED_FIRST_CHUNK
1765#endif
1766
1767/* build pcpu_page_first_chunk() iff needed by the arch config */
1768#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1769#define BUILD_PAGE_FIRST_CHUNK
1770#endif
1771
1772/* pcpu_build_alloc_info() is used by both embed and page first chunk */
1773#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1774/**
1775 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1776 * @reserved_size: the size of reserved percpu area in bytes
1777 * @dyn_size: minimum free size for dynamic allocation in bytes
1778 * @atom_size: allocation atom size
1779 * @cpu_distance_fn: callback to determine distance between cpus, optional
1780 *
1781 * This function determines grouping of units, their mappings to cpus
1782 * and other parameters considering needed percpu size, allocation
1783 * atom size and distances between CPUs.
1784 *
1785 * Groups are always mutliples of atom size and CPUs which are of
1786 * LOCAL_DISTANCE both ways are grouped together and share space for
1787 * units in the same group.  The returned configuration is guaranteed
1788 * to have CPUs on different nodes on different groups and >=75% usage
1789 * of allocated virtual address space.
1790 *
1791 * RETURNS:
1792 * On success, pointer to the new allocation_info is returned.  On
1793 * failure, ERR_PTR value is returned.
1794 */
1795static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1796                                size_t reserved_size, size_t dyn_size,
1797                                size_t atom_size,
1798                                pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1799{
1800        static int group_map[NR_CPUS] __initdata;
1801        static int group_cnt[NR_CPUS] __initdata;
1802        const size_t static_size = __per_cpu_end - __per_cpu_start;
1803        int nr_groups = 1, nr_units = 0;
1804        size_t size_sum, min_unit_size, alloc_size;
1805        int upa, max_upa, uninitialized_var(best_upa);  /* units_per_alloc */
1806        int last_allocs, group, unit;
1807        unsigned int cpu, tcpu;
1808        struct pcpu_alloc_info *ai;
1809        unsigned int *cpu_map;
1810
1811        /* this function may be called multiple times */
1812        memset(group_map, 0, sizeof(group_map));
1813        memset(group_cnt, 0, sizeof(group_cnt));
1814
1815        /* calculate size_sum and ensure dyn_size is enough for early alloc */
1816        size_sum = PFN_ALIGN(static_size + reserved_size +
1817                            max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1818        dyn_size = size_sum - static_size - reserved_size;
1819
1820        /*
1821         * Determine min_unit_size, alloc_size and max_upa such that
1822         * alloc_size is multiple of atom_size and is the smallest
1823         * which can accommodate 4k aligned segments which are equal to
1824         * or larger than min_unit_size.
1825         */
1826        min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1827
1828        alloc_size = roundup(min_unit_size, atom_size);
1829        upa = alloc_size / min_unit_size;
1830        while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1831                upa--;
1832        max_upa = upa;
1833
1834        /* group cpus according to their proximity */
1835        for_each_possible_cpu(cpu) {
1836                group = 0;
1837        next_group:
1838                for_each_possible_cpu(tcpu) {
1839                        if (cpu == tcpu)
1840                                break;
1841                        if (group_map[tcpu] == group && cpu_distance_fn &&
1842                            (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1843                             cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1844                                group++;
1845                                nr_groups = max(nr_groups, group + 1);
1846                                goto next_group;
1847                        }
1848                }
1849                group_map[cpu] = group;
1850                group_cnt[group]++;
1851        }
1852
1853        /*
1854         * Expand unit size until address space usage goes over 75%
1855         * and then as much as possible without using more address
1856         * space.
1857         */
1858        last_allocs = INT_MAX;
1859        for (upa = max_upa; upa; upa--) {
1860                int allocs = 0, wasted = 0;
1861
1862                if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1863                        continue;
1864
1865                for (group = 0; group < nr_groups; group++) {
1866                        int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1867                        allocs += this_allocs;
1868                        wasted += this_allocs * upa - group_cnt[group];
1869                }
1870
1871                /*
1872                 * Don't accept if wastage is over 1/3.  The
1873                 * greater-than comparison ensures upa==1 always
1874                 * passes the following check.
1875                 */
1876                if (wasted > num_possible_cpus() / 3)
1877                        continue;
1878
1879                /* and then don't consume more memory */
1880                if (allocs > last_allocs)
1881                        break;
1882                last_allocs = allocs;
1883                best_upa = upa;
1884        }
1885        upa = best_upa;
1886
1887        /* allocate and fill alloc_info */
1888        for (group = 0; group < nr_groups; group++)
1889                nr_units += roundup(group_cnt[group], upa);
1890
1891        ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1892        if (!ai)
1893                return ERR_PTR(-ENOMEM);
1894        cpu_map = ai->groups[0].cpu_map;
1895
1896        for (group = 0; group < nr_groups; group++) {
1897                ai->groups[group].cpu_map = cpu_map;
1898                cpu_map += roundup(group_cnt[group], upa);
1899        }
1900
1901        ai->static_size = static_size;
1902        ai->reserved_size = reserved_size;
1903        ai->dyn_size = dyn_size;
1904        ai->unit_size = alloc_size / upa;
1905        ai->atom_size = atom_size;
1906        ai->alloc_size = alloc_size;
1907
1908        for (group = 0, unit = 0; group_cnt[group]; group++) {
1909                struct pcpu_group_info *gi = &ai->groups[group];
1910
1911                /*
1912                 * Initialize base_offset as if all groups are located
1913                 * back-to-back.  The caller should update this to
1914                 * reflect actual allocation.
1915                 */
1916                gi->base_offset = unit * ai->unit_size;
1917
1918                for_each_possible_cpu(cpu)
1919                        if (group_map[cpu] == group)
1920                                gi->cpu_map[gi->nr_units++] = cpu;
1921                gi->nr_units = roundup(gi->nr_units, upa);
1922                unit += gi->nr_units;
1923        }
1924        BUG_ON(unit != nr_units);
1925
1926        return ai;
1927}
1928#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1929
1930#if defined(BUILD_EMBED_FIRST_CHUNK)
1931/**
1932 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1933 * @reserved_size: the size of reserved percpu area in bytes
1934 * @dyn_size: minimum free size for dynamic allocation in bytes
1935 * @atom_size: allocation atom size
1936 * @cpu_distance_fn: callback to determine distance between cpus, optional
1937 * @alloc_fn: function to allocate percpu page
1938 * @free_fn: function to free percpu page
1939 *
1940 * This is a helper to ease setting up embedded first percpu chunk and
1941 * can be called where pcpu_setup_first_chunk() is expected.
1942 *
1943 * If this function is used to setup the first chunk, it is allocated
1944 * by calling @alloc_fn and used as-is without being mapped into
1945 * vmalloc area.  Allocations are always whole multiples of @atom_size
1946 * aligned to @atom_size.
1947 *
1948 * This enables the first chunk to piggy back on the linear physical
1949 * mapping which often uses larger page size.  Please note that this
1950 * can result in very sparse cpu->unit mapping on NUMA machines thus
1951 * requiring large vmalloc address space.  Don't use this allocator if
1952 * vmalloc space is not orders of magnitude larger than distances
1953 * between node memory addresses (ie. 32bit NUMA machines).
1954 *
1955 * @dyn_size specifies the minimum dynamic area size.
1956 *
1957 * If the needed size is smaller than the minimum or specified unit
1958 * size, the leftover is returned using @free_fn.
1959 *
1960 * RETURNS:
1961 * 0 on success, -errno on failure.
1962 */
1963int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1964                                  size_t atom_size,
1965                                  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1966                                  pcpu_fc_alloc_fn_t alloc_fn,
1967                                  pcpu_fc_free_fn_t free_fn)
1968{
1969        void *base = (void *)ULONG_MAX;
1970        void **areas = NULL;
1971        struct pcpu_alloc_info *ai;
1972        size_t size_sum, areas_size, max_distance;
1973        int group, i, rc;
1974
1975        ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1976                                   cpu_distance_fn);
1977        if (IS_ERR(ai))
1978                return PTR_ERR(ai);
1979
1980        size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1981        areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1982
1983        areas = memblock_virt_alloc_nopanic(areas_size, 0);
1984        if (!areas) {
1985                rc = -ENOMEM;
1986                goto out_free;
1987        }
1988
1989        /* allocate, copy and determine base address */
1990        for (group = 0; group < ai->nr_groups; group++) {
1991                struct pcpu_group_info *gi = &ai->groups[group];
1992                unsigned int cpu = NR_CPUS;
1993                void *ptr;
1994
1995                for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1996                        cpu = gi->cpu_map[i];
1997                BUG_ON(cpu == NR_CPUS);
1998
1999                /* allocate space for the whole group */
2000                ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
2001                if (!ptr) {
2002                        rc = -ENOMEM;
2003                        goto out_free_areas;
2004                }
2005                /* kmemleak tracks the percpu allocations separately */
2006                kmemleak_free(ptr);
2007                areas[group] = ptr;
2008
2009                base = min(ptr, base);
2010        }
2011
2012        /*
2013         * Copy data and free unused parts.  This should happen after all
2014         * allocations are complete; otherwise, we may end up with
2015         * overlapping groups.
2016         */
2017        for (group = 0; group < ai->nr_groups; group++) {
2018                struct pcpu_group_info *gi = &ai->groups[group];
2019                void *ptr = areas[group];
2020
2021                for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
2022                        if (gi->cpu_map[i] == NR_CPUS) {
2023                                /* unused unit, free whole */
2024                                free_fn(ptr, ai->unit_size);
2025                                continue;
2026                        }
2027                        /* copy and return the unused part */
2028                        memcpy(ptr, __per_cpu_load, ai->static_size);
2029                        free_fn(ptr + size_sum, ai->unit_size - size_sum);
2030                }
2031        }
2032
2033        /* base address is now known, determine group base offsets */
2034        max_distance = 0;
2035        for (group = 0; group < ai->nr_groups; group++) {
2036                ai->groups[group].base_offset = areas[group] - base;
2037                max_distance = max_t(size_t, max_distance,
2038                                     ai->groups[group].base_offset);
2039        }
2040        max_distance += ai->unit_size;
2041
2042        /* warn if maximum distance is further than 75% of vmalloc space */
2043        if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2044                pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
2045                           "space 0x%lx\n", max_distance,
2046                           VMALLOC_TOTAL);
2047#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2048                /* and fail if we have fallback */
2049                rc = -EINVAL;
2050                goto out_free;
2051#endif
2052        }
2053
2054        pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
2055                PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
2056                ai->dyn_size, ai->unit_size);
2057
2058        rc = pcpu_setup_first_chunk(ai, base);
2059        goto out_free;
2060
2061out_free_areas:
2062        for (group = 0; group < ai->nr_groups; group++)
2063                if (areas[group])
2064                        free_fn(areas[group],
2065                                ai->groups[group].nr_units * ai->unit_size);
2066out_free:
2067        pcpu_free_alloc_info(ai);
2068        if (areas)
2069                memblock_free_early(__pa(areas), areas_size);
2070        return rc;
2071}
2072#endif /* BUILD_EMBED_FIRST_CHUNK */
2073
2074#ifdef BUILD_PAGE_FIRST_CHUNK
2075/**
2076 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2077 * @reserved_size: the size of reserved percpu area in bytes
2078 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2079 * @free_fn: function to free percpu page, always called with PAGE_SIZE
2080 * @populate_pte_fn: function to populate pte
2081 *
2082 * This is a helper to ease setting up page-remapped first percpu
2083 * chunk and can be called where pcpu_setup_first_chunk() is expected.
2084 *
2085 * This is the basic allocator.  Static percpu area is allocated
2086 * page-by-page into vmalloc area.
2087 *
2088 * RETURNS:
2089 * 0 on success, -errno on failure.
2090 */
2091int __init pcpu_page_first_chunk(size_t reserved_size,
2092                                 pcpu_fc_alloc_fn_t alloc_fn,
2093                                 pcpu_fc_free_fn_t free_fn,
2094                                 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2095{
2096        static struct vm_struct vm;
2097        struct pcpu_alloc_info *ai;
2098        char psize_str[16];
2099        int unit_pages;
2100        size_t pages_size;
2101        struct page **pages;
2102        int unit, i, j, rc;
2103
2104        snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2105
2106        ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2107        if (IS_ERR(ai))
2108                return PTR_ERR(ai);
2109        BUG_ON(ai->nr_groups != 1);
2110        BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
2111
2112        unit_pages = ai->unit_size >> PAGE_SHIFT;
2113
2114        /* unaligned allocations can't be freed, round up to page size */
2115        pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2116                               sizeof(pages[0]));
2117        pages = memblock_virt_alloc(pages_size, 0);
2118
2119        /* allocate pages */
2120        j = 0;
2121        for (unit = 0; unit < num_possible_cpus(); unit++)
2122                for (i = 0; i < unit_pages; i++) {
2123                        unsigned int cpu = ai->groups[0].cpu_map[unit];
2124                        void *ptr;
2125
2126                        ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2127                        if (!ptr) {
2128                                pr_warning("PERCPU: failed to allocate %s page "
2129                                           "for cpu%u\n", psize_str, cpu);
2130                                goto enomem;
2131                        }
2132                        /* kmemleak tracks the percpu allocations separately */
2133                        kmemleak_free(ptr);
2134                        pages[j++] = virt_to_page(ptr);
2135                }
2136
2137        /* allocate vm area, map the pages and copy static data */
2138        vm.flags = VM_ALLOC;
2139        vm.size = num_possible_cpus() * ai->unit_size;
2140        vm_area_register_early(&vm, PAGE_SIZE);
2141
2142        for (unit = 0; unit < num_possible_cpus(); unit++) {
2143                unsigned long unit_addr =
2144                        (unsigned long)vm.addr + unit * ai->unit_size;
2145
2146                for (i = 0; i < unit_pages; i++)
2147                        populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
2148
2149                /* pte already populated, the following shouldn't fail */
2150                rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
2151                                      unit_pages);
2152                if (rc < 0)
2153                        panic("failed to map percpu area, err=%d\n", rc);
2154
2155                /*
2156                 * FIXME: Archs with virtual cache should flush local
2157                 * cache for the linear mapping here - something
2158                 * equivalent to flush_cache_vmap() on the local cpu.
2159                 * flush_cache_vmap() can't be used as most supporting
2160                 * data structures are not set up yet.
2161                 */
2162
2163                /* copy static data */
2164                memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
2165        }
2166
2167        /* we're ready, commit */
2168        pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
2169                unit_pages, psize_str, vm.addr, ai->static_size,
2170                ai->reserved_size, ai->dyn_size);
2171
2172        rc = pcpu_setup_first_chunk(ai, vm.addr);
2173        goto out_free_ar;
2174
2175enomem:
2176        while (--j >= 0)
2177                free_fn(page_address(pages[j]), PAGE_SIZE);
2178        rc = -ENOMEM;
2179out_free_ar:
2180        memblock_free_early(__pa(pages), pages_size);
2181        pcpu_free_alloc_info(ai);
2182        return rc;
2183}
2184#endif /* BUILD_PAGE_FIRST_CHUNK */
2185
2186#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
2187/*
2188 * Generic SMP percpu area setup.
2189 *
2190 * The embedding helper is used because its behavior closely resembles
2191 * the original non-dynamic generic percpu area setup.  This is
2192 * important because many archs have addressing restrictions and might
2193 * fail if the percpu area is located far away from the previous
2194 * location.  As an added bonus, in non-NUMA cases, embedding is
2195 * generally a good idea TLB-wise because percpu area can piggy back
2196 * on the physical linear memory mapping which uses large page
2197 * mappings on applicable archs.
2198 */
2199unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2200EXPORT_SYMBOL(__per_cpu_offset);
2201
2202static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2203                                       size_t align)
2204{
2205        return  memblock_virt_alloc_from_nopanic(
2206                        size, align, __pa(MAX_DMA_ADDRESS));
2207}
2208
2209static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2210{
2211        memblock_free_early(__pa(ptr), size);
2212}
2213
2214void __init setup_per_cpu_areas(void)
2215{
2216        unsigned long delta;
2217        unsigned int cpu;
2218        int rc;
2219
2220        /*
2221         * Always reserve area for module percpu variables.  That's
2222         * what the legacy allocator did.
2223         */
2224        rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2225                                    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2226                                    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
2227        if (rc < 0)
2228                panic("Failed to initialize percpu areas.");
2229
2230        delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2231        for_each_possible_cpu(cpu)
2232                __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2233}
2234#endif  /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
2235
2236#else   /* CONFIG_SMP */
2237
2238/*
2239 * UP percpu area setup.
2240 *
2241 * UP always uses km-based percpu allocator with identity mapping.
2242 * Static percpu variables are indistinguishable from the usual static
2243 * variables and don't require any special preparation.
2244 */
2245void __init setup_per_cpu_areas(void)
2246{
2247        const size_t unit_size =
2248                roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
2249                                         PERCPU_DYNAMIC_RESERVE));
2250        struct pcpu_alloc_info *ai;
2251        void *fc;
2252
2253        ai = pcpu_alloc_alloc_info(1, 1);
2254        fc = memblock_virt_alloc_from_nopanic(unit_size,
2255                                              PAGE_SIZE,
2256                                              __pa(MAX_DMA_ADDRESS));
2257        if (!ai || !fc)
2258                panic("Failed to allocate memory for percpu areas.");
2259        /* kmemleak tracks the percpu allocations separately */
2260        kmemleak_free(fc);
2261
2262        ai->dyn_size = unit_size;
2263        ai->unit_size = unit_size;
2264        ai->atom_size = unit_size;
2265        ai->alloc_size = unit_size;
2266        ai->groups[0].nr_units = 1;
2267        ai->groups[0].cpu_map[0] = 0;
2268
2269        if (pcpu_setup_first_chunk(ai, fc) < 0)
2270                panic("Failed to initialize percpu areas.");
2271}
2272
2273#endif  /* CONFIG_SMP */
2274
2275/*
2276 * First and reserved chunks are initialized with temporary allocation
2277 * map in initdata so that they can be used before slab is online.
2278 * This function is called after slab is brought up and replaces those
2279 * with properly allocated maps.
2280 */
2281void __init percpu_init_late(void)
2282{
2283        struct pcpu_chunk *target_chunks[] =
2284                { pcpu_first_chunk, pcpu_reserved_chunk, NULL };
2285        struct pcpu_chunk *chunk;
2286        unsigned long flags;
2287        int i;
2288
2289        for (i = 0; (chunk = target_chunks[i]); i++) {
2290                int *map;
2291                const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
2292
2293                BUILD_BUG_ON(size > PAGE_SIZE);
2294
2295                map = pcpu_mem_zalloc(size);
2296                BUG_ON(!map);
2297
2298                spin_lock_irqsave(&pcpu_lock, flags);
2299                memcpy(map, chunk->map, size);
2300                chunk->map = map;
2301                spin_unlock_irqrestore(&pcpu_lock, flags);
2302        }
2303}
2304
2305/*
2306 * Percpu allocator is initialized early during boot when neither slab or
2307 * workqueue is available.  Plug async management until everything is up
2308 * and running.
2309 */
2310static int __init percpu_enable_async(void)
2311{
2312        pcpu_async_enabled = true;
2313        return 0;
2314}
2315subsys_initcall(percpu_enable_async);
Note: See TracBrowser for help on using the repository browser.