+static void axfs_free_region(struct axfs_super *sbi,
+ struct axfs_region_desc *region)
+{
+ if (!region)
+ return;
+
+ if (AXFS_IS_REGION_XIP(sbi, region))
+ return;
+
+ if (region->virt_addr)
+ vfree(region->virt_addr);
+}
+
+static void axfs_put_sbi(struct axfs_super *sbi)
+{
+ axfs_free_region(sbi, &sbi->uids);
+ axfs_free_region(sbi, &sbi->gids);
+
+ if (sbi->second_dev)
+ kfree(sbi->second_dev);
+
+ if (sbi->cblock_buffer[0])
+ vfree(sbi->cblock_buffer[0]);
+ if (sbi->cblock_buffer[1])
+ vfree(sbi->cblock_buffer[1]);
+
+
+static int axfs_copy_mem(struct super_block *sb, void *buf, u64 fsoffset,
+ u64 len)
+{
+ struct axfs_super *sbi = AXFS_SB(sb);
+ unsigned long addr;
+
+ addr = sbi->virt_start_addr + (unsigned long)fsoffset;
+ memcpy(buf, (void *)addr, (size_t) len);
+ return 0;
+}
+
+static int axfs_copy_metadata(struct super_block *sb, void *buf, u64 fsoffset,
+ u64 len)
+{
+ struct axfs_super *sbi = AXFS_SB(sb);
+ u64 end = fsoffset + len;
+ u64 a = sbi->mmap_size - fsoffset;
+ u64 b = end - sbi->mmap_size;
+ void *bb = (void *)((unsigned long)buf + (unsigned long)a);
+ int err;
+
+ /* Catches case where sbi is not yet fully initialized. */
+ if ((sbi->magic == 0) && (sbi->virt_start_addr != 0))
+ return axfs_copy_mem(sb, buf, fsoffset, len);
+
+ if (fsoffset < sbi->mmap_size) {
+ if (end > sbi->mmap_size) {
+ err = axfs_copy_metadata(sb, buf, fsoffset, a);
+ if (err)
+ return err;
+ err = axfs_copy_metadata(sb, bb, sbi->mmap_size, b);
+ } else {
+ if (AXFS_IS_OFFSET_MMAPABLE(sbi, fsoffset)) {
+ err = axfs_copy_mem(sb, buf, fsoffset, len);
+ } else if (AXFS_HAS_MTD(sb)) {
+ err = axfs_copy_mtd(sb, buf, fsoffset, len);
+ } else if (AXFS_HAS_BDEV(sb)) {
+ err = axfs_copy_block(sb, buf, fsoffset, len);
+ } else {
+ err = -EINVAL;
+ }
+ }
+ } else {
+ if (AXFS_NODEV(sb)) {
+ err = axfs_copy_mem(sb, buf, fsoffset, len);
+ } else if (AXFS_HAS_BDEV(sb)) {
+ err = axfs_copy_block(sb, buf, fsoffset, len);
+ } else if (AXFS_HAS_MTD(sb)) {
+ err = axfs_copy_mtd(sb, buf, fsoffset, len);
+ } else {
+ err = -EINVAL;
+ }
+ }
+ return err;
+}
+
+static int axfs_fill_region_data(struct super_block *sb,
+ struct axfs_region_desc *region, int force)
+{
+
+ if (AXFS_IS_REGION_INCORE(region))
+ goto incore;
+
+ if (AXFS_IS_REGION_COMPRESSED(region))
+ goto incore;
+
+ if (AXFS_IS_REGION_XIP(sbi, region)) {
+ if ((end > sbi->mmap_size) && (force))
+ goto incore;
+ addr = sbi->virt_start_addr;
+ addr += (unsigned long)fsoffset;
+ region->virt_addr = (void *)addr;
+ return 0;
+ }
+
+incore:
+ region->virt_addr = vmalloc(size);
+ if (!region->virt_addr)
+ goto out;
+ vaddr = region->virt_addr;
+
+ if (AXFS_IS_REGION_COMPRESSED(region)) {
+ buff = vmalloc(c_size);
+ if (!buff)
+ goto out;
+ axfs_copy_metadata(sb, buff, fsoffset, c_size);
+ err = axfs_uncompress_block(vaddr, size, buff, c_size);
+ if (!err)
+ goto out;
+ vfree(buff);
+ } else {
+ axfs_copy_metadata(sb, vaddr, fsoffset, size);
+ }
+
+ return 0;
+
+out:
+ if (buff)
+ vfree(buff);
+ if (region->virt_addr)
+ vfree(region->virt_addr);
+ return err;
+}
+
+
+static int axfs_get_onmedia_super(struct super_block *sb)
+{
+ int err;
+ struct axfs_super *sbi = AXFS_SB(sb);
+ struct axfs_super_onmedia *sbo;
+
+ sbo = kmalloc(sizeof(*sbo), GFP_KERNEL);
+ if (!sbo)
+ return -ENOMEM;
+
+ axfs_copy_metadata(sb, (void *)sbo, 0, sizeof(*sbo));
+
+ /* Do sanity checks on the superblock */
+ if (be32_to_cpu(sbo->magic) != AXFS_MAGIC) {
+ printk(KERN_ERR "axfs: wrong magic\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* verify the signiture is correct */
+ if (strncmp(sbo->signature, AXFS_SIGNATURE, sizeof(AXFS_SIGNATURE))) {
+ printk(KERN_ERR "axfs: wrong axfs signature,"
+ " read %s, expected %s\n",
+ sbo->signature, AXFS_SIGNATURE);
+ err = -EINVAL;
+ goto out;
+ }
+
+ sbi->magic = be32_to_cpu(sbo->magic);
+ sbi->version_major = sbo->version_major;
+ sbi->version_minor = sbo->version_minor;
+ sbi->version_sub = sbo->version_sub;
+ sbi->files = be64_to_cpu(sbo->files);
+ sbi->size = be64_to_cpu(sbo->size);
+ sbi->blocks = be64_to_cpu(sbo->blocks);
+ sbi->mmap_size = be64_to_cpu(sbo->mmap_size);
+ sbi->cblock_size = be32_to_cpu(sbo->cblock_size);
+ sbi->timestamp.tv_sec = be64_to_cpu(sbo->timestamp);
+ sbi->timestamp.tv_nsec = 0;
+ sbi->compression_type = sbo->compression_type;
+
+ err = axfs_set_compression_type(sbi);
+ if (err)
+ goto out;
+
+ /* If no block or MTD device, adjust mmapable to cover all image */
+ if (AXFS_NODEV(sb))
+ sbi->mmap_size = sbi->size;
+
+ err = axfs_fill_region_descriptors(sb, sbo);
+ if (err)
+ goto out;
+
+ err = 0;
+out:
+ kfree(sbo);
+ return err;
+}
+