source: src/linux/universal/linux-3.5/drivers/mtd/mtdpart.c @ 20115

Last change on this file since 20115 was 20115, checked in by BrainSlayer, 4 years ago

update kernels

File size: 27.6 KB
Line 
1/*
2 * Simple MTD partitioning layer
3 *
4 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
5 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
6 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/list.h>
29#include <linux/kmod.h>
30#include <linux/mtd/mtd.h>
31#include <linux/mtd/partitions.h>
32#include <linux/root_dev.h>
33#include <linux/magic.h>
34#include <linux/err.h>
35
36#define MTD_ERASE_PARTIAL       0x8000 /* partition only covers parts of an erase block */
37
38#include "mtdcore.h"
39
40/* Our partition linked list */
41static LIST_HEAD(mtd_partitions);
42static DEFINE_MUTEX(mtd_partitions_mutex);
43
44/* Our partition node structure */
45struct mtd_part {
46        struct mtd_info mtd;
47        struct mtd_info *master;
48        uint64_t offset;
49        struct list_head list;
50};
51
52/*
53 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
54 * the pointer to that structure with this macro.
55 */
56#define PART(x)  ((struct mtd_part *)(x))
57#define IS_PART(mtd) (mtd->_read == part_read)
58
59/*
60 * MTD methods which simply translate the effective address and pass through
61 * to the _real_ device.
62 */
63
64static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
65                size_t *retlen, u_char *buf)
66{
67        struct mtd_part *part = PART(mtd);
68        struct mtd_ecc_stats stats;
69        int res;
70
71        stats = part->master->ecc_stats;
72        res = part->master->_read(part->master, from + part->offset, len,
73                                  retlen, buf);
74        if (unlikely(mtd_is_eccerr(res)))
75                mtd->ecc_stats.failed +=
76                        part->master->ecc_stats.failed - stats.failed;
77        else
78                mtd->ecc_stats.corrected +=
79                        part->master->ecc_stats.corrected - stats.corrected;
80        return res;
81}
82
83static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
84                size_t *retlen, void **virt, resource_size_t *phys)
85{
86        struct mtd_part *part = PART(mtd);
87
88        return part->master->_point(part->master, from + part->offset, len,
89                                    retlen, virt, phys);
90}
91
92static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
93{
94        struct mtd_part *part = PART(mtd);
95
96        return part->master->_unpoint(part->master, from + part->offset, len);
97}
98
99static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
100                                            unsigned long len,
101                                            unsigned long offset,
102                                            unsigned long flags)
103{
104        struct mtd_part *part = PART(mtd);
105
106        offset += part->offset;
107        return part->master->_get_unmapped_area(part->master, len, offset,
108                                                flags);
109}
110
111static int part_read_oob(struct mtd_info *mtd, loff_t from,
112                struct mtd_oob_ops *ops)
113{
114        struct mtd_part *part = PART(mtd);
115        int res;
116
117        if (from >= mtd->size)
118                return -EINVAL;
119        if (ops->datbuf && from + ops->len > mtd->size)
120                return -EINVAL;
121
122        /*
123         * If OOB is also requested, make sure that we do not read past the end
124         * of this partition.
125         */
126        if (ops->oobbuf) {
127                size_t len, pages;
128
129                if (ops->mode == MTD_OPS_AUTO_OOB)
130                        len = mtd->oobavail;
131                else
132                        len = mtd->oobsize;
133                pages = mtd_div_by_ws(mtd->size, mtd);
134                pages -= mtd_div_by_ws(from, mtd);
135                if (ops->ooboffs + ops->ooblen > pages * len)
136                        return -EINVAL;
137        }
138
139        res = part->master->_read_oob(part->master, from + part->offset, ops);
140        if (unlikely(res)) {
141                if (mtd_is_bitflip(res))
142                        mtd->ecc_stats.corrected++;
143                if (mtd_is_eccerr(res))
144                        mtd->ecc_stats.failed++;
145        }
146        return res;
147}
148
149static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
150                size_t len, size_t *retlen, u_char *buf)
151{
152        struct mtd_part *part = PART(mtd);
153        return part->master->_read_user_prot_reg(part->master, from, len,
154                                                 retlen, buf);
155}
156
157static int part_get_user_prot_info(struct mtd_info *mtd,
158                struct otp_info *buf, size_t len)
159{
160        struct mtd_part *part = PART(mtd);
161        return part->master->_get_user_prot_info(part->master, buf, len);
162}
163
164static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
165                size_t len, size_t *retlen, u_char *buf)
166{
167        struct mtd_part *part = PART(mtd);
168        return part->master->_read_fact_prot_reg(part->master, from, len,
169                                                 retlen, buf);
170}
171
172static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
173                size_t len)
174{
175        struct mtd_part *part = PART(mtd);
176        return part->master->_get_fact_prot_info(part->master, buf, len);
177}
178
179static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
180                size_t *retlen, const u_char *buf)
181{
182        struct mtd_part *part = PART(mtd);
183        return part->master->_write(part->master, to + part->offset, len,
184                                    retlen, buf);
185}
186
187static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
188                size_t *retlen, const u_char *buf)
189{
190        struct mtd_part *part = PART(mtd);
191        return part->master->_panic_write(part->master, to + part->offset, len,
192                                          retlen, buf);
193}
194
195static int part_write_oob(struct mtd_info *mtd, loff_t to,
196                struct mtd_oob_ops *ops)
197{
198        struct mtd_part *part = PART(mtd);
199
200
201        if (to >= mtd->size)
202                return -EINVAL;
203        if (ops->datbuf && to + ops->len > mtd->size)
204                return -EINVAL;
205        return part->master->_write_oob(part->master, to + part->offset, ops);
206}
207
208static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
209                size_t len, size_t *retlen, u_char *buf)
210{
211        struct mtd_part *part = PART(mtd);
212        return part->master->_write_user_prot_reg(part->master, from, len,
213                                                  retlen, buf);
214}
215
216static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
217                size_t len)
218{
219        struct mtd_part *part = PART(mtd);
220        return part->master->_lock_user_prot_reg(part->master, from, len);
221}
222
223static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
224                unsigned long count, loff_t to, size_t *retlen)
225{
226        struct mtd_part *part = PART(mtd);
227        return part->master->_writev(part->master, vecs, count,
228                                     to + part->offset, retlen);
229}
230
231static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
232{
233        struct mtd_part *part = PART(mtd);
234        int ret;
235
236        instr->partial_start = false;
237        if (mtd->flags & MTD_ERASE_PARTIAL) {
238                size_t readlen = 0;
239                u64 mtd_ofs;
240
241                instr->erase_buf = kmalloc(part->master->erasesize, GFP_ATOMIC);
242                if (!instr->erase_buf)
243                        return -ENOMEM;
244
245                mtd_ofs = part->offset + instr->addr;
246                instr->erase_buf_ofs = do_div(mtd_ofs, part->master->erasesize);
247
248                if (instr->erase_buf_ofs > 0) {
249                        instr->addr -= instr->erase_buf_ofs;
250                        ret = part->master->_read(part->master,
251                                instr->addr + part->offset,
252                                part->master->erasesize,
253                                &readlen, instr->erase_buf);
254
255                        instr->partial_start = true;
256                } else {
257                        mtd_ofs = part->offset + part->mtd.size;
258                        instr->erase_buf_ofs = part->master->erasesize -
259                                do_div(mtd_ofs, part->master->erasesize);
260
261                        if (instr->erase_buf_ofs > 0) {
262                                instr->len += instr->erase_buf_ofs;
263                                ret = part->master->_read(part->master,
264                                        part->offset + instr->addr +
265                                        instr->len - part->master->erasesize,
266                                        part->master->erasesize, &readlen,
267                                        instr->erase_buf);
268                        } else {
269                                ret = 0;
270                        }
271                }
272                if (ret < 0) {
273                        kfree(instr->erase_buf);
274                        return ret;
275                }
276
277        }
278
279        instr->addr += part->offset;
280        ret = part->master->_erase(part->master, instr);
281        if (ret) {
282                if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
283                        instr->fail_addr -= part->offset;
284                instr->addr -= part->offset;
285                if (mtd->flags & MTD_ERASE_PARTIAL)
286                        kfree(instr->erase_buf);
287        }
288
289        return ret;
290}
291
292void mtd_erase_callback(struct erase_info *instr)
293{
294        if (instr->mtd->_erase == part_erase) {
295                struct mtd_part *part = PART(instr->mtd);
296                size_t wrlen = 0;
297
298                if (instr->mtd->flags & MTD_ERASE_PARTIAL) {
299                        if (instr->partial_start) {
300                                part->master->_write(part->master,
301                                        instr->addr, instr->erase_buf_ofs,
302                                        &wrlen, instr->erase_buf);
303                                instr->addr += instr->erase_buf_ofs;
304                        } else {
305                                instr->len -= instr->erase_buf_ofs;
306                                part->master->_write(part->master,
307                                        instr->addr + instr->len,
308                                        instr->erase_buf_ofs, &wrlen,
309                                        instr->erase_buf +
310                                        part->master->erasesize -
311                                        instr->erase_buf_ofs);
312                        }
313                        kfree(instr->erase_buf);
314                }
315                if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
316                        instr->fail_addr -= part->offset;
317                instr->addr -= part->offset;
318        }
319        if (instr->callback)
320                instr->callback(instr);
321}
322EXPORT_SYMBOL_GPL(mtd_erase_callback);
323
324static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
325{
326        struct mtd_part *part = PART(mtd);
327        return part->master->_lock(part->master, ofs + part->offset, len);
328}
329
330static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
331{
332        struct mtd_part *part = PART(mtd);
333        return part->master->_unlock(part->master, ofs + part->offset, len);
334}
335
336static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
337{
338        struct mtd_part *part = PART(mtd);
339        return part->master->_is_locked(part->master, ofs + part->offset, len);
340}
341
342static void part_sync(struct mtd_info *mtd)
343{
344        struct mtd_part *part = PART(mtd);
345        part->master->_sync(part->master);
346}
347
348static int part_suspend(struct mtd_info *mtd)
349{
350        struct mtd_part *part = PART(mtd);
351        return part->master->_suspend(part->master);
352}
353
354static void part_resume(struct mtd_info *mtd)
355{
356        struct mtd_part *part = PART(mtd);
357        part->master->_resume(part->master);
358}
359
360static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
361{
362        struct mtd_part *part = PART(mtd);
363        ofs += part->offset;
364        return part->master->_block_isbad(part->master, ofs);
365}
366
367static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
368{
369        struct mtd_part *part = PART(mtd);
370        int res;
371
372        ofs += part->offset;
373        res = part->master->_block_markbad(part->master, ofs);
374        if (!res)
375                mtd->ecc_stats.badblocks++;
376        return res;
377}
378
379static inline void free_partition(struct mtd_part *p)
380{
381        kfree(p->mtd.name);
382        kfree(p);
383}
384
385/*
386 * This function unregisters and destroy all slave MTD objects which are
387 * attached to the given master MTD object.
388 */
389
390int del_mtd_partitions(struct mtd_info *master)
391{
392        struct mtd_part *slave, *next;
393        int ret, err = 0;
394
395        mutex_lock(&mtd_partitions_mutex);
396        list_for_each_entry_safe(slave, next, &mtd_partitions, list)
397                if (slave->master == master) {
398                        ret = del_mtd_device(&slave->mtd);
399                        if (ret < 0) {
400                                err = ret;
401                                continue;
402                        }
403                        list_del(&slave->list);
404                        free_partition(slave);
405                }
406        mutex_unlock(&mtd_partitions_mutex);
407
408        return err;
409}
410
411static struct mtd_part *allocate_partition(struct mtd_info *master,
412                        const struct mtd_partition *part, int partno,
413                        uint64_t cur_offset)
414{
415        struct mtd_part *slave;
416        char *name;
417
418        /* allocate the partition structure */
419        slave = kzalloc(sizeof(*slave), GFP_KERNEL);
420        name = kstrdup(part->name, GFP_KERNEL);
421        if (!name || !slave) {
422                printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
423                       master->name);
424                kfree(name);
425                kfree(slave);
426                return ERR_PTR(-ENOMEM);
427        }
428
429        /* set up the MTD object for this partition */
430        slave->mtd.type = master->type;
431        slave->mtd.flags = master->flags & ~part->mask_flags;
432        slave->mtd.size = part->size;
433        slave->mtd.writesize = master->writesize;
434        slave->mtd.writebufsize = master->writebufsize;
435        slave->mtd.oobsize = master->oobsize;
436        slave->mtd.oobavail = master->oobavail;
437        slave->mtd.subpage_sft = master->subpage_sft;
438
439        slave->mtd.name = name;
440        slave->mtd.owner = master->owner;
441        slave->mtd.backing_dev_info = master->backing_dev_info;
442
443        /* NOTE:  we don't arrange MTDs as a tree; it'd be error-prone
444         * to have the same data be in two different partitions.
445         */
446        slave->mtd.dev.parent = master->dev.parent;
447
448        slave->mtd._read = part_read;
449        slave->mtd._write = part_write;
450
451        if (master->_panic_write)
452                slave->mtd._panic_write = part_panic_write;
453
454        if (master->_point && master->_unpoint) {
455                slave->mtd._point = part_point;
456                slave->mtd._unpoint = part_unpoint;
457        }
458
459        if (master->_get_unmapped_area)
460                slave->mtd._get_unmapped_area = part_get_unmapped_area;
461        if (master->_read_oob)
462                slave->mtd._read_oob = part_read_oob;
463        if (master->_write_oob)
464                slave->mtd._write_oob = part_write_oob;
465        if (master->_read_user_prot_reg)
466                slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
467        if (master->_read_fact_prot_reg)
468                slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
469        if (master->_write_user_prot_reg)
470                slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
471        if (master->_lock_user_prot_reg)
472                slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
473        if (master->_get_user_prot_info)
474                slave->mtd._get_user_prot_info = part_get_user_prot_info;
475        if (master->_get_fact_prot_info)
476                slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
477        if (master->_sync)
478                slave->mtd._sync = part_sync;
479        if (!partno && !master->dev.class && master->_suspend &&
480            master->_resume) {
481                        slave->mtd._suspend = part_suspend;
482                        slave->mtd._resume = part_resume;
483        }
484        if (master->_writev)
485                slave->mtd._writev = part_writev;
486        if (master->_lock)
487                slave->mtd._lock = part_lock;
488        if (master->_unlock)
489                slave->mtd._unlock = part_unlock;
490        if (master->_is_locked)
491                slave->mtd._is_locked = part_is_locked;
492        if (master->_block_isbad)
493                slave->mtd._block_isbad = part_block_isbad;
494        if (master->_block_markbad)
495                slave->mtd._block_markbad = part_block_markbad;
496        slave->mtd._erase = part_erase;
497        slave->master = master;
498        slave->offset = part->offset;
499
500        if (slave->offset == MTDPART_OFS_APPEND)
501                slave->offset = cur_offset;
502        if (slave->offset == MTDPART_OFS_NXTBLK) {
503                slave->offset = cur_offset;
504                if (mtd_mod_by_eb(cur_offset, master) != 0) {
505                        /* Round up to next erasesize */
506                        slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
507                        printk(KERN_NOTICE "Moving partition %d: "
508                               "0x%012llx -> 0x%012llx\n", partno,
509                               (unsigned long long)cur_offset, (unsigned long long)slave->offset);
510                }
511        }
512        if (slave->offset == MTDPART_OFS_RETAIN) {
513                slave->offset = cur_offset;
514                if (master->size - slave->offset >= slave->mtd.size) {
515                        slave->mtd.size = master->size - slave->offset
516                                                        - slave->mtd.size;
517                } else {
518                        printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
519                                part->name, master->size - slave->offset,
520                                slave->mtd.size);
521                        /* register to preserve ordering */
522                        goto out_register;
523                }
524        }
525        if (slave->mtd.size == MTDPART_SIZ_FULL)
526                slave->mtd.size = master->size - slave->offset;
527
528        printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
529                (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
530
531        /* let's do some sanity checks */
532        if (slave->offset >= master->size) {
533                /* let's register it anyway to preserve ordering */
534                slave->offset = 0;
535                slave->mtd.size = 0;
536                printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
537                        part->name);
538                goto out_register;
539        }
540        if (slave->offset + slave->mtd.size > master->size) {
541                slave->mtd.size = master->size - slave->offset;
542                printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
543                        part->name, master->name, (unsigned long long)slave->mtd.size);
544        }
545        if (master->numeraseregions > 1) {
546                /* Deal with variable erase size stuff */
547                int i, max = master->numeraseregions;
548                u64 end = slave->offset + slave->mtd.size;
549                struct mtd_erase_region_info *regions = master->eraseregions;
550
551                /* Find the first erase regions which is part of this
552                 * partition. */
553                for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
554                        ;
555                /* The loop searched for the region _behind_ the first one */
556                if (i > 0)
557                        i--;
558
559                /* Pick biggest erasesize */
560                for (; i < max && regions[i].offset < end; i++) {
561                        if (slave->mtd.erasesize < regions[i].erasesize) {
562                                slave->mtd.erasesize = regions[i].erasesize;
563                        }
564                }
565                BUG_ON(slave->mtd.erasesize == 0);
566        } else {
567                /* Single erase size */
568                slave->mtd.erasesize = master->erasesize;
569        }
570
571        if ((slave->mtd.flags & MTD_WRITEABLE) &&
572            mtd_mod_by_eb(slave->offset, &slave->mtd)) {
573                /* Doesn't start on a boundary of major erase size */
574                slave->mtd.flags |= MTD_ERASE_PARTIAL;
575                if (((u32) slave->mtd.size) > master->erasesize)
576                        slave->mtd.flags &= ~MTD_WRITEABLE;
577                else
578                        slave->mtd.erasesize = slave->mtd.size;
579        }
580        if ((slave->mtd.flags & MTD_WRITEABLE) &&
581            mtd_mod_by_eb(slave->offset + slave->mtd.size, &slave->mtd)) {
582                slave->mtd.flags |= MTD_ERASE_PARTIAL;
583
584                if ((u32) slave->mtd.size > master->erasesize)
585                        slave->mtd.flags &= ~MTD_WRITEABLE;
586                else
587                        slave->mtd.erasesize = slave->mtd.size;
588        }
589        if ((slave->mtd.flags & (MTD_ERASE_PARTIAL|MTD_WRITEABLE)) == MTD_ERASE_PARTIAL)
590                printk(KERN_WARNING"mtd: partition \"%s\" must either start or end on erase block boundary or be smaller than an erase block -- forcing read-only\n",
591                                part->name);
592
593        slave->mtd.ecclayout = master->ecclayout;
594        slave->mtd.ecc_strength = master->ecc_strength;
595        slave->mtd.bitflip_threshold = master->bitflip_threshold;
596
597        if (master->_block_isbad) {
598                uint64_t offs = 0;
599
600                while (offs < slave->mtd.size) {
601                        if (mtd_block_isbad(master, offs + slave->offset))
602                                slave->mtd.ecc_stats.badblocks++;
603                        offs += slave->mtd.erasesize;
604                }
605        }
606
607out_register:
608        return slave;
609}
610
611int mtd_add_partition(struct mtd_info *master, char *name,
612                      long long offset, long long length)
613{
614        struct mtd_partition part;
615        struct mtd_part *p, *new;
616        uint64_t start, end;
617        int ret = 0;
618
619        /* the direct offset is expected */
620        if (offset == MTDPART_OFS_APPEND ||
621            offset == MTDPART_OFS_NXTBLK)
622                return -EINVAL;
623
624        if (length == MTDPART_SIZ_FULL)
625                length = master->size - offset;
626
627        if (length <= 0)
628                return -EINVAL;
629
630        part.name = name;
631        part.size = length;
632        part.offset = offset;
633        part.mask_flags = 0;
634        part.ecclayout = NULL;
635
636        new = allocate_partition(master, &part, -1, offset);
637        if (IS_ERR(new))
638                return PTR_ERR(new);
639
640        start = offset;
641        end = offset + length;
642
643        mutex_lock(&mtd_partitions_mutex);
644        list_for_each_entry(p, &mtd_partitions, list)
645                if (p->master == master) {
646                        if ((start >= p->offset) &&
647                            (start < (p->offset + p->mtd.size)))
648                                goto err_inv;
649
650                        if ((end >= p->offset) &&
651                            (end < (p->offset + p->mtd.size)))
652                                goto err_inv;
653                }
654
655        list_add(&new->list, &mtd_partitions);
656        mutex_unlock(&mtd_partitions_mutex);
657
658        add_mtd_device(&new->mtd);
659
660        return ret;
661err_inv:
662        mutex_unlock(&mtd_partitions_mutex);
663        free_partition(new);
664        return -EINVAL;
665}
666EXPORT_SYMBOL_GPL(mtd_add_partition);
667
668int mtd_del_partition(struct mtd_info *master, int partno)
669{
670        struct mtd_part *slave, *next;
671        int ret = -EINVAL;
672
673        mutex_lock(&mtd_partitions_mutex);
674        list_for_each_entry_safe(slave, next, &mtd_partitions, list)
675                if ((slave->master == master) &&
676                    (slave->mtd.index == partno)) {
677                        ret = del_mtd_device(&slave->mtd);
678                        if (ret < 0)
679                                break;
680
681                        list_del(&slave->list);
682                        free_partition(slave);
683                        break;
684                }
685        mutex_unlock(&mtd_partitions_mutex);
686
687        return ret;
688}
689EXPORT_SYMBOL_GPL(mtd_del_partition);
690
691#ifdef CONFIG_MTD_ROOTFS_SPLIT
692#define ROOTFS_SPLIT_NAME "rootfs_data"
693#define ROOTFS_REMOVED_NAME "<removed>"
694
695struct squashfs_super_block {
696        __le32 s_magic;
697        __le32 pad0[9];
698        __le64 bytes_used;
699};
700
701
702static int split_squashfs(struct mtd_info *master, int offset, int *split_offset)
703{
704        struct squashfs_super_block sb;
705        int len, ret;
706
707        ret = mtd_read(master, offset, sizeof(sb), &len, (void *) &sb);
708        if (ret || (len != sizeof(sb))) {
709                printk(KERN_ALERT "split_squashfs: error occured while reading "
710                        "from \"%s\"\n", master->name);
711                return -EINVAL;
712        }
713
714        if (SQUASHFS_MAGIC != le32_to_cpu(sb.s_magic) ) {
715                printk(KERN_ALERT "split_squashfs: no squashfs found in \"%s\"\n",
716                        master->name);
717                *split_offset = 0;
718                return 0;
719        }
720
721        if (le64_to_cpu((sb.bytes_used)) <= 0) {
722                printk(KERN_ALERT "split_squashfs: squashfs is empty in \"%s\"\n",
723                        master->name);
724                *split_offset = 0;
725                return 0;
726        }
727
728        len = (u32) le64_to_cpu(sb.bytes_used);
729        len += (offset & 0x000fffff);
730        len +=  (master->erasesize - 1);
731        len &= ~(master->erasesize - 1);
732        len -= (offset & 0x000fffff);
733        *split_offset = offset + len;
734
735        return 0;
736}
737
738static int split_rootfs_data(struct mtd_info *master, struct mtd_info *rpart, const struct mtd_partition *part)
739{
740        struct mtd_partition *dpart;
741        struct mtd_part *slave = NULL;
742        struct mtd_part *spart;
743        int ret, split_offset = 0;
744
745        spart = PART(rpart);
746        ret = split_squashfs(master, spart->offset, &split_offset);
747        if (ret)
748                return ret;
749
750        if (split_offset <= 0)
751                return 0;
752
753        dpart = kmalloc(sizeof(*part)+sizeof(ROOTFS_SPLIT_NAME)+1, GFP_KERNEL);
754        if (dpart == NULL) {
755                printk(KERN_INFO "split_squashfs: no memory for partition \"%s\"\n",
756                        ROOTFS_SPLIT_NAME);
757                return -ENOMEM;
758        }
759
760        memcpy(dpart, part, sizeof(*part));
761        dpart->name = (unsigned char *)&dpart[1];
762        strcpy(dpart->name, ROOTFS_SPLIT_NAME);
763
764        dpart->size = rpart->size - (split_offset - spart->offset);
765        dpart->offset = split_offset;
766
767        if (dpart == NULL)
768                return 1;
769
770        printk(KERN_INFO "mtd: partition \"%s\" created automatically, ofs=%llX, len=%llX \n",
771                ROOTFS_SPLIT_NAME, dpart->offset, dpart->size);
772
773        slave = allocate_partition(master, dpart, 0, split_offset);
774        if (IS_ERR(slave))
775                return PTR_ERR(slave);
776        mutex_lock(&mtd_partitions_mutex);
777        list_add(&slave->list, &mtd_partitions);
778        mutex_unlock(&mtd_partitions_mutex);
779
780        add_mtd_device(&slave->mtd);
781
782        rpart->split = &slave->mtd;
783
784        return 0;
785}
786
787static int refresh_rootfs_split(struct mtd_info *mtd)
788{
789        struct mtd_partition tpart;
790        struct mtd_part *part;
791        char *name;
792        //int index = 0;
793        int offset, size;
794        int ret;
795
796        part = PART(mtd);
797
798        /* check for the new squashfs offset first */
799        ret = split_squashfs(part->master, part->offset, &offset);
800        if (ret)
801                return ret;
802
803        if ((offset > 0) && !mtd->split) {
804                printk(KERN_INFO "%s: creating new split partition for \"%s\"\n", __func__, mtd->name);
805                /* if we don't have a rootfs split partition, create a new one */
806                tpart.name = (char *) mtd->name;
807                tpart.size = mtd->size;
808                tpart.offset = part->offset;
809
810                return split_rootfs_data(part->master, &part->mtd, &tpart);
811        } else if ((offset > 0) && mtd->split) {
812                /* update the offsets of the existing partition */
813                size = mtd->size + part->offset - offset;
814
815                part = PART(mtd->split);
816                part->offset = offset;
817                part->mtd.size = size;
818                printk(KERN_INFO "%s: %s partition \"" ROOTFS_SPLIT_NAME "\", offset: 0x%06x (0x%06x)\n",
819                        __func__, (!strcmp(part->mtd.name, ROOTFS_SPLIT_NAME) ? "updating" : "creating"),
820                        (u32) part->offset, (u32) part->mtd.size);
821                name = kmalloc(sizeof(ROOTFS_SPLIT_NAME) + 1, GFP_KERNEL);
822                strcpy(name, ROOTFS_SPLIT_NAME);
823                part->mtd.name = name;
824        } else if ((offset <= 0) && mtd->split) {
825                printk(KERN_INFO "%s: removing partition \"%s\"\n", __func__, mtd->split->name);
826
827                /* mark existing partition as removed */
828                part = PART(mtd->split);
829                name = kmalloc(sizeof(ROOTFS_SPLIT_NAME) + 1, GFP_KERNEL);
830                strcpy(name, ROOTFS_REMOVED_NAME);
831                part->mtd.name = name;
832                part->offset = 0;
833                part->mtd.size = 0;
834        }
835
836        return 0;
837}
838#endif /* CONFIG_MTD_ROOTFS_SPLIT */
839
840
841
842/*
843 * This function, given a master MTD object and a partition table, creates
844 * and registers slave MTD objects which are bound to the master according to
845 * the partition definitions.
846 *
847 * We don't register the master, or expect the caller to have done so,
848 * for reasons of data integrity.
849 */
850
851int add_mtd_partitions(struct mtd_info *master,
852                       const struct mtd_partition *parts,
853                       int nbparts)
854{
855        struct mtd_part *slave;
856        uint64_t cur_offset = 0;
857        int i;
858#ifdef CONFIG_MTD_ROOTFS_SPLIT
859        int ret;
860#endif
861
862        printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
863
864        for (i = 0; i < nbparts; i++) {
865                slave = allocate_partition(master, parts + i, i, cur_offset);
866                if (IS_ERR(slave))
867                        return PTR_ERR(slave);
868
869                mutex_lock(&mtd_partitions_mutex);
870                list_add(&slave->list, &mtd_partitions);
871                mutex_unlock(&mtd_partitions_mutex);
872
873                add_mtd_device(&slave->mtd);
874
875                if (!strcmp(parts[i].name, "rootfs")) {
876#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
877                        if (ROOT_DEV == 0) {
878                                printk(KERN_NOTICE "mtd: partition \"rootfs\" "
879                                        "set to be root filesystem\n");
880                                ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, slave->mtd.index);
881                        }
882#endif
883#ifdef CONFIG_MTD_ROOTFS_SPLIT
884                        ret = split_rootfs_data(master, &slave->mtd, &parts[i]);
885                        /* if (ret == 0)
886                         *      j++; */
887#endif
888                }
889
890                cur_offset = slave->offset + slave->mtd.size;
891        }
892
893        return 0;
894}
895
896int refresh_mtd_partitions(struct mtd_info *mtd)
897{
898        int ret = 0;
899
900        if (IS_PART(mtd)) {
901                struct mtd_part *part;
902                struct mtd_info *master;
903
904                part = PART(mtd);
905                master = part->master;
906                if (master->refresh_device)
907                        ret = master->refresh_device(master);
908        }
909
910        if (!ret && mtd->refresh_device)
911                ret = mtd->refresh_device(mtd);
912
913#ifdef CONFIG_MTD_ROOTFS_SPLIT
914        if (!ret && IS_PART(mtd) && !strcmp(mtd->name, "rootfs"))
915                refresh_rootfs_split(mtd);
916#endif
917
918        return 0;
919}
920EXPORT_SYMBOL_GPL(refresh_mtd_partitions);
921
922static DEFINE_SPINLOCK(part_parser_lock);
923static LIST_HEAD(part_parsers);
924
925static struct mtd_part_parser *get_partition_parser(const char *name)
926{
927        struct mtd_part_parser *p, *ret = NULL;
928
929        spin_lock(&part_parser_lock);
930
931        list_for_each_entry(p, &part_parsers, list)
932                if (!strcmp(p->name, name) && try_module_get(p->owner)) {
933                        ret = p;
934                        break;
935                }
936
937        spin_unlock(&part_parser_lock);
938
939        return ret;
940}
941
942#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
943
944int register_mtd_parser(struct mtd_part_parser *p)
945{
946        spin_lock(&part_parser_lock);
947        list_add(&p->list, &part_parsers);
948        spin_unlock(&part_parser_lock);
949
950        return 0;
951}
952EXPORT_SYMBOL_GPL(register_mtd_parser);
953
954int deregister_mtd_parser(struct mtd_part_parser *p)
955{
956        spin_lock(&part_parser_lock);
957        list_del(&p->list);
958        spin_unlock(&part_parser_lock);
959        return 0;
960}
961EXPORT_SYMBOL_GPL(deregister_mtd_parser);
962
963/*
964 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
965 * are changing this array!
966 */
967static const char *default_mtd_part_types[] = {
968        "cmdlinepart",
969        "ofpart",
970        NULL
971};
972
973/**
974 * parse_mtd_partitions - parse MTD partitions
975 * @master: the master partition (describes whole MTD device)
976 * @types: names of partition parsers to try or %NULL
977 * @pparts: array of partitions found is returned here
978 * @data: MTD partition parser-specific data
979 *
980 * This function tries to find partition on MTD device @master. It uses MTD
981 * partition parsers, specified in @types. However, if @types is %NULL, then
982 * the default list of parsers is used. The default list contains only the
983 * "cmdlinepart" and "ofpart" parsers ATM.
984 * Note: If there are more then one parser in @types, the kernel only takes the
985 * partitions parsed out by the first parser.
986 *
987 * This function may return:
988 * o a negative error code in case of failure
989 * o zero if no partitions were found
990 * o a positive number of found partitions, in which case on exit @pparts will
991 *   point to an array containing this number of &struct mtd_info objects.
992 */
993int parse_mtd_partitions(struct mtd_info *master, const char **types,
994                         struct mtd_partition **pparts,
995                         struct mtd_part_parser_data *data)
996{
997        struct mtd_part_parser *parser;
998        int ret = 0;
999
1000        if (!types)
1001                types = default_mtd_part_types;
1002
1003        for ( ; ret <= 0 && *types; types++) {
1004                parser = get_partition_parser(*types);
1005                if (!parser && !request_module("%s", *types))
1006                        parser = get_partition_parser(*types);
1007                if (!parser)
1008                        continue;
1009                ret = (*parser->parse_fn)(master, pparts, data);
1010                put_partition_parser(parser);
1011                if (ret > 0) {
1012                        printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
1013                               ret, parser->name, master->name);
1014                        break;
1015                }
1016        }
1017        return ret;
1018}
1019
1020int mtd_is_partition(struct mtd_info *mtd)
1021{
1022        struct mtd_part *part;
1023        int ispart = 0;
1024
1025        mutex_lock(&mtd_partitions_mutex);
1026        list_for_each_entry(part, &mtd_partitions, list)
1027                if (&part->mtd == mtd) {
1028                        ispart = 1;
1029                        break;
1030                }
1031        mutex_unlock(&mtd_partitions_mutex);
1032
1033        return ispart;
1034}
1035EXPORT_SYMBOL_GPL(mtd_is_partition);
Note: See TracBrowser for help on using the repository browser.