Re: [PATCH 08/10] Add yaffs2 file system: core guts code

From: Ryan Mallon
Date: Wed Feb 09 2011 - 21:27:01 EST


On 02/09/2011 04:26 PM, Charles Manning wrote:
> Signed-off-by: Charles Manning <cdhmanning@xxxxxxxxx>

Hi Charles,

More comments below. The cache "locking" thing needs to be sorted.

~Ryan

> ---
> fs/yaffs2/yaffs_guts.c | 5001 ++++++++++++++++++++++++++++++++++++++++++++++++
> fs/yaffs2/yaffs_guts.h | 938 +++++++++
> 2 files changed, 5939 insertions(+), 0 deletions(-)
> create mode 100644 fs/yaffs2/yaffs_guts.c
> create mode 100644 fs/yaffs2/yaffs_guts.h
>
> diff --git a/fs/yaffs2/yaffs_guts.c b/fs/yaffs2/yaffs_guts.c
> new file mode 100644
> index 0000000..403ce11
> --- /dev/null
> +++ b/fs/yaffs2/yaffs_guts.c
> @@ -0,0 +1,5001 @@
> +/*
> + * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
> + *
> + * Copyright (C) 2002-2011 Aleph One Ltd.
> + * for Toby Churchill Ltd and Brightstar Engineering
> + *
> + * Created by Charles Manning <charles@xxxxxxxxxxxx>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#include "yportenv.h"
> +#include "yaffs_trace.h"
> +
> +#include "yaffs_guts.h"
> +#include "yaffs_tagsvalidity.h"
> +#include "yaffs_getblockinfo.h"
> +#include "yaffs_tagscompat.h"
> +#include "yaffs_nand.h"
> +#include "yaffs_yaffs1.h"
> +#include "yaffs_yaffs2.h"
> +#include "yaffs_bitmap.h"
> +#include "yaffs_verify.h"
> +#include "yaffs_nand.h"
> +#include "yaffs_packedtags2.h"
> +#include "yaffs_nameval.h"
> +#include "yaffs_allocator.h"
> +#include "yaffs_attribs.h"
> +
> +/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
> +#define YAFFS_GC_GOOD_ENOUGH 2
> +#define YAFFS_GC_PASSIVE_THRESHOLD 4
> +
> +#include "yaffs_ecc.h"
> +
> +/* Forward declarations */
> +
> +static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
> + const u8 *buffer, int n_bytes, int use_reserve);
> +
> +
> +
> +/* Function to calculate chunk and offset */
> +
> +static inline void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
> + int *chunk_out, u32 *offset_out)
> +{

Possibly should drop inline from this function as it is quite big.

> + int chunk;
> + u32 offset;
> +
> + chunk = (u32) (addr >> dev->chunk_shift);
> +
> + if (dev->chunk_div == 1) {
> + /* easy power of 2 case */
> + offset = (u32) (addr & dev->chunk_mask);

You shouldn't need an explicit cast here (or below).

> + } else {
> + /* Non power-of-2 case */
> +
> + loff_t chunk_base;
> +
> + chunk /= dev->chunk_div;
> +
> + chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
> + offset = (u32) (addr - chunk_base);
> + }
> +
> + *chunk_out = chunk;
> + *offset_out = offset;
> +}
> +
> +/* Function to return the number of shifts for a power of 2 greater than or
> + * equal to the given number
> + * Note we don't try to cater for all possible numbers and this does not have to
> + * be hellishly efficient.
> + */
> +
> +static inline u32 calc_shifts_ceiling(u32 x)
> +{
> + int extra_bits;
> + int shifts;
> +
> + shifts = extra_bits = 0;
> +
> + while (x > 1) {
> + if (x & 1)
> + extra_bits++;
> + x >>= 1;
> + shifts++;
> + }
> +
> + if (extra_bits)
> + shifts++;
> +
> + return shifts;
> +}
> +
> +/* Function to return the number of shifts to get a 1 in bit 0
> + */
> +
> +static inline u32 calc_shifts(u32 x)
> +{
> + u32 shifts;
> +
> + shifts = 0;
> +
> + if (!x)
> + return 0;
> +
> + while (!(x & 1)) {
> + x >>= 1;
> + shifts++;
> + }
> +
> + return shifts;
> +}
> +
> +/*
> + * Temporary buffer manipulations.
> + */
> +
> +static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
> +{
> + int i;
> + u8 *buf = (u8 *) 1;

This looks suspect.

> +
> + memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
> +
> + for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
> + dev->temp_buffer[i].line = 0; /* not in use */
> + dev->temp_buffer[i].buffer = buf =
> + kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);

The assignment to buf is sneakily hidden in this statement. This
function should also free any buffers it has allocated if it fails.
Something like this:

static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
{
int i;

for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
dev->temp_buffer[i].line = 0;
dev->temp_buffer[i].buffer =
kmalloc(dev->param.total_bytes_per_chunk,
GFP_NOFS);
if (!dev->temp_buffer[i].buffer)
goto fail;
}

return YAFFS_OK;

fail:
while (--i >= 0)
kfree(dev->temp_buffer[i].buffer);
return YAFFS_FAIL;
}

> + }
> +
> + return buf ? YAFFS_OK : YAFFS_FAIL;
> +}
> +
> +u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev, int line_no)
> +{
> + int i;
> + int j;
> +
> + dev->temp_in_use++;

Should this get incremented even if the allocation fails?

> + if (dev->temp_in_use > dev->max_temp)
> + dev->max_temp = dev->temp_in_use;

If dev->temp_in_use >= dev->max_temp then is the for loop below still
worth entering or can we just jump over it knowing that there are no
free buffers?

> +
> + for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
> + if (dev->temp_buffer[i].line == 0) {

if (dev->temp_buffer[i].line != 0)
continue;

Then drop the level of indentation.

> + dev->temp_buffer[i].line = line_no;
> + if ((i + 1) > dev->max_temp) {
> + dev->max_temp = i + 1;
> + for (j = 0; j <= i; j++)
> + dev->temp_buffer[j].max_line =
> + dev->temp_buffer[j].line;
> + }
> +
> + return dev->temp_buffer[i].buffer;
> + }
> + }
> +
> + yaffs_trace(YAFFS_TRACE_BUFFERS,
> + "Out of temp buffers at line %d, other held by lines:",
> + line_no);
> + for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
> + yaffs_trace(YAFFS_TRACE_BUFFERS,
> + " %d", dev->temp_buffer[i].line);
> +
> + /*
> + * If we got here then we have to allocate an unmanaged one
> + * This is not good.
> + */
> +
> + dev->unmanaged_buffer_allocs++;
> + return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
> +
> +}
> +
> +void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer, int line_no)
> +{
> + int i;
> +
> + dev->temp_in_use--;
> +
> + for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
> + if (dev->temp_buffer[i].buffer == buffer) {
> + dev->temp_buffer[i].line = 0;
> + return;
> + }
> + }
> +
> + if (buffer) {
> + /* assume it is an unmanaged one. */
> + yaffs_trace(YAFFS_TRACE_BUFFERS,
> + "Releasing unmanaged temp buffer in line %d",
> + line_no);
> + kfree(buffer);
> + dev->unmanaged_buffer_deallocs++;
> + }
> +
> +}
> +
> +/*
> + * Determine if we have a managed buffer.
> + */
> +int yaffs_is_managed_tmp_buffer(struct yaffs_dev *dev, const u8 *buffer)
> +{
> + int i;
> +
> + for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
> + if (dev->temp_buffer[i].buffer == buffer)
> + return 1;
> + }
> +
> + for (i = 0; i < dev->param.n_caches; i++) {
> + if (dev->cache[i].data == buffer)
> + return 1;
> + }
> +
> + if (buffer == dev->checkpt_buffer)
> + return 1;
> +
> + yaffs_trace(YAFFS_TRACE_ALWAYS,
> + "yaffs: unmaged buffer detected.");

Typo "unmaged".

> + return 0;
> +}
> +
> +/*
> + * Functions for robustisizing TODO
> + *
> + */
> +
> +static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
> + const u8 *data,
> + const struct yaffs_ext_tags *tags)
> +{
> + dev = dev;
> + nand_chunk = nand_chunk;
> + data = data;
> + tags = tags;

Remove this function, it doesn't do anything useful.

> +}
> +
> +static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
> + const struct yaffs_ext_tags *tags)
> +{
> + dev = dev;
> + nand_chunk = nand_chunk;
> + tags = tags;

and this one.

> +}
> +
> +void yaffs_handle_chunk_error(struct yaffs_dev *dev,
> + struct yaffs_block_info *bi)
> +{
> + if (!bi->gc_prioritise) {

if (bi->gc_prioritise)
return;

Drop indentation.

> + bi->gc_prioritise = 1;
> + dev->has_pending_prioritised_gc = 1;
> + bi->chunk_error_strikes++;
> +
> + if (bi->chunk_error_strikes > 3) {
> + bi->needs_retiring = 1; /* Too many stikes, so retire */
> + yaffs_trace(YAFFS_TRACE_ALWAYS,
> + "yaffs: Block struck out");
> +
> + }
> + }
> +}
> +
> +static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
> + int erased_ok)
> +{
> + int flash_block = nand_chunk / dev->param.chunks_per_block;
> + struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
> +
> + yaffs_handle_chunk_error(dev, bi);
> +
> + if (erased_ok) {
> + /* Was an actual write failure,
> + * so mark the block for retirement.*/
> + bi->needs_retiring = 1;
> + yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
> + "**>> Block %d needs retiring", flash_block);
> + }
> +
> + /* Delete the chunk */
> + yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
> + yaffs_skip_rest_of_block(dev);
> +}
> +
> +/*
> + * Verification code
> + */
> +
> +/*
> + * Simple hash function. Needs to have a reasonable spread
> + */
> +
> +static inline int yaffs_hash_fn(int n)
> +{
> + n = abs(n);
> + return n % YAFFS_NOBJECT_BUCKETS;
> +}
> +
> +/*
> + * Access functions to useful fake objects.
> + * Note that root might have a presence in NAND if permissions are set.
> + */
> +
> +struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
> +{
> + return dev->root_dir;
> +}
> +
> +struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
> +{
> + return dev->lost_n_found;
> +}
> +
> +/*
> + * Erased NAND checking functions
> + */
> +
> +int yaffs_check_ff(u8 *buffer, int n_bytes)
> +{
> + /* Horrible, slow implementation */
> + while (n_bytes--) {
> + if (*buffer != 0xff)
> + return 0;
> + buffer++;
> + }
> + return 1;
> +}

Maybe something like:

/* Add more 0xffs to taste */
static u8 ff_buf[] = {0xff, 0xff, 0xff, 0xff};
int yaffs_check_ff(u8 *buffer, int n_bytes)
{
int pos = 0;

while (pos < n_bytes) {
int len = min(ARRAY_SIZE, n_bytes);

if (memcmp(buffer + pos, ff_buf, len) != 0)
return 0;

pos += len;
}
return 1;
}

Is more effecient?

> +
> +static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
> +{
> + int retval = YAFFS_OK;
> + u8 *data = yaffs_get_temp_buffer(dev, __LINE__);
> + struct yaffs_ext_tags tags;
> + int result;
> +
> + result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);

result is unused?

> +
> + if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
> + retval = YAFFS_FAIL;
> +
> + if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
> + tags.chunk_used) {
> + yaffs_trace(YAFFS_TRACE_NANDACCESS,
> + "Chunk %d not erased", nand_chunk);
> + retval = YAFFS_FAIL;
> + }
> +
> + yaffs_release_temp_buffer(dev, data, __LINE__);
> +
> + return retval;
> +
> +}
> +
> +static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
> + int nand_chunk,
> + const u8 *data,
> + struct yaffs_ext_tags *tags)
> +{
> + int retval = YAFFS_OK;
> + struct yaffs_ext_tags temp_tags;
> + u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
> + int result;
> +
> + result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);

result is unused.

> + if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
> + temp_tags.obj_id != tags->obj_id ||
> + temp_tags.chunk_id != tags->chunk_id ||
> + temp_tags.n_bytes != tags->n_bytes)
> + retval = YAFFS_FAIL;
> +
> + yaffs_release_temp_buffer(dev, buffer, __LINE__);
> +
> + return retval;
> +}
> +
> +
> +int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
> +{
> + int reserved_chunks;
> + int reserved_blocks = dev->param.n_reserved_blocks;
> + int checkpt_blocks;
> +
> + checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
> +
> + reserved_chunks =
> + (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block;
> +
> + return (dev->n_free_chunks > (reserved_chunks + n_chunks));
> +}
> +
> +static int yaffs_find_alloc_block(struct yaffs_dev *dev)
> +{
> + int i;
> + struct yaffs_block_info *bi;
> +
> + if (dev->n_erased_blocks < 1) {
> + /* Hoosterman we've got a problem.
> + * Can't get space to gc
> + */
> + yaffs_trace(YAFFS_TRACE_ERROR,
> + "yaffs tragedy: no more erased blocks");
> +
> + return -1;
> + }
> +
> + /* Find an empty block. */
> +
> + for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
> + dev->alloc_block_finder++;
> + if (dev->alloc_block_finder < dev->internal_start_block
> + || dev->alloc_block_finder > dev->internal_end_block) {
> + dev->alloc_block_finder = dev->internal_start_block;
> + }
> +
> + bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
> +
> + if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
> + bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
> + dev->seq_number++;
> + bi->seq_number = dev->seq_number;
> + dev->n_erased_blocks--;
> + yaffs_trace(YAFFS_TRACE_ALLOCATE,
> + "Allocated block %d, seq %d, %d left" ,
> + dev->alloc_block_finder, dev->seq_number,
> + dev->n_erased_blocks);
> + return dev->alloc_block_finder;
> + }
> + }
> +
> + yaffs_trace(YAFFS_TRACE_ALWAYS,
> + "yaffs tragedy: no more erased blocks, but there should have been %d",
> + dev->n_erased_blocks);
> +
> + return -1;
> +}
> +
> +static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
> + struct yaffs_block_info **block_ptr)
> +{
> + int ret_val;
> + struct yaffs_block_info *bi;
> +
> + if (dev->alloc_block < 0) {
> + /* Get next block to allocate off */
> + dev->alloc_block = yaffs_find_alloc_block(dev);
> + dev->alloc_page = 0;
> + }
> +
> + if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
> + /* No space unless we're allowed to use the reserve. */
> + return -1;
> + }
> +
> + if (dev->n_erased_blocks < dev->param.n_reserved_blocks
> + && dev->alloc_page == 0)
> + yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
> +
> + /* Next page please.... */
> + if (dev->alloc_block >= 0) {
> + bi = yaffs_get_block_info(dev, dev->alloc_block);
> +
> + ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
> + dev->alloc_page;
> + bi->pages_in_use++;
> + yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
> +
> + dev->alloc_page++;
> +
> + dev->n_free_chunks--;
> +
> + /* If the block is full set the state to full */
> + if (dev->alloc_page >= dev->param.chunks_per_block) {
> + bi->block_state = YAFFS_BLOCK_STATE_FULL;
> + dev->alloc_block = -1;
> + }
> +
> + if (block_ptr)
> + *block_ptr = bi;
> +
> + return ret_val;
> + }
> +
> + yaffs_trace(YAFFS_TRACE_ERROR,
> + "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
> +
> + return -1;
> +}
> +
> +static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
> +{
> + int n;
> +
> + n = dev->n_erased_blocks * dev->param.chunks_per_block;
> +
> + if (dev->alloc_block > 0)
> + n += (dev->param.chunks_per_block - dev->alloc_page);
> +
> + return n;
> +
> +}
> +
> +/*
> + * yaffs_skip_rest_of_block() skips over the rest of the allocation block
> + * if we don't want to write to it.
> + */
> +void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
> +{
> + struct yaffs_block_info *bi;
> +
> + if (dev->alloc_block > 0) {
> + bi = yaffs_get_block_info(dev, dev->alloc_block);
> + if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
> + bi->block_state = YAFFS_BLOCK_STATE_FULL;
> + dev->alloc_block = -1;
> + }
> + }
> +}
> +
> +static int yaffs_write_new_chunk(struct yaffs_dev *dev,
> + const u8 *data,
> + struct yaffs_ext_tags *tags, int use_reserver)
> +{
> + int attempts = 0;
> + int write_ok = 0;
> + int chunk;
> +
> + yaffs2_checkpt_invalidate(dev);
> +
> + do {
> + struct yaffs_block_info *bi = 0;
> + int erased_ok = 0;
> +
> + chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
> + if (chunk < 0) {
> + /* no space */
> + break;
> + }
> +
> + /* First check this chunk is erased, if it needs
> + * checking. The checking policy (unless forced
> + * always on) is as follows:
> + *
> + * Check the first page we try to write in a block.
> + * If the check passes then we don't need to check any
> + * more. If the check fails, we check again...
> + * If the block has been erased, we don't need to check.
> + *
> + * However, if the block has been prioritised for gc,
> + * then we think there might be something odd about
> + * this block and stop using it.
> + *
> + * Rationale: We should only ever see chunks that have
> + * not been erased if there was a partially written
> + * chunk due to power loss. This checking policy should
> + * catch that case with very few checks and thus save a
> + * lot of checks that are most likely not needed.
> + *
> + * Mods to the above
> + * If an erase check fails or the write fails we skip the
> + * rest of the block.
> + */
> +
> + /* let's give it a try */
> + attempts++;
> +
> + if (dev->param.always_check_erased)
> + bi->skip_erased_check = 0;
> +
> + if (!bi->skip_erased_check) {
> + erased_ok = yaffs_check_chunk_erased(dev, chunk);
> + if (erased_ok != YAFFS_OK) {
> + yaffs_trace(YAFFS_TRACE_ERROR,
> + "**>> yaffs chunk %d was not erased",
> + chunk);
> +
> + /* If not erased, delete this one,
> + * skip rest of block and
> + * try another chunk */
> + yaffs_chunk_del(dev, chunk, 1, __LINE__);
> + yaffs_skip_rest_of_block(dev);
> + continue;
> + }
> + }
> +
> + write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
> +
> + if (!bi->skip_erased_check)
> + write_ok =
> + yaffs_verify_chunk_written(dev, chunk, data, tags);
> +
> + if (write_ok != YAFFS_OK) {
> + /* Clean up aborted write, skip to next block and
> + * try another chunk */
> + yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
> + continue;
> + }
> +
> + bi->skip_erased_check = 1;
> +
> + /* Copy the data into the robustification buffer */
> + yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
> +
> + } while (write_ok != YAFFS_OK &&
> + (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
> +
> + if (!write_ok)
> + chunk = -1;
> +
> + if (attempts > 1) {
> + yaffs_trace(YAFFS_TRACE_ERROR,
> + "**>> yaffs write required %d attempts",
> + attempts);

Printing the block/page number here would be useful.

> + dev->n_retired_writes += (attempts - 1);
> + }
> +
> + return chunk;
> +}
> +
> +/*
> + * Block retiring for handling a broken block.
> + */
> +
> +static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
> +{
> + struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
> +
> + yaffs2_checkpt_invalidate(dev);
> +
> + yaffs2_clear_oldest_dirty_seq(dev, bi);
> +
> + if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
> + if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
> + yaffs_trace(YAFFS_TRACE_ALWAYS,
> + "yaffs: Failed to mark bad and erase block %d",
> + flash_block);
> + } else {
> + struct yaffs_ext_tags tags;
> + int chunk_id =
> + flash_block * dev->param.chunks_per_block;
> +
> + u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
> +
> + memset(buffer, 0xff, dev->data_bytes_per_chunk);
> + yaffs_init_tags(&tags);
> + tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
> + if (dev->param.write_chunk_tags_fn(dev, chunk_id -
> + dev->chunk_offset,
> + buffer,
> + &tags) != YAFFS_OK)
> + yaffs_trace(YAFFS_TRACE_ALWAYS,
> + "yaffs: Failed to write bad block marker to block %d",
> + flash_block);
> +
> + yaffs_release_temp_buffer(dev, buffer, __LINE__);
> + }
> + }
> +
> + bi->block_state = YAFFS_BLOCK_STATE_DEAD;
> + bi->gc_prioritise = 0;
> + bi->needs_retiring = 0;
> +
> + dev->n_retired_blocks++;
> +}
> +
> +/*---------------- Name handling functions ------------*/
> +
> +static u16 yaffs_calc_name_sum(const YCHAR *name)
> +{
> + u16 sum = 0;
> + u16 i = 1;
> +
> + if (!name)
> + return 0;
> +
> + while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) {

Don't need parens around *name.

> +
> + /* 0x1f mask is case insensitive */
> + sum += ((*name) & 0x1f) * i;
> + i++;
> + name++;
> + }
> + return sum;
> +}
> +
> +void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
> +{
> +#ifndef CONFIG_YAFFS_NO_SHORT_NAMES

This appears not to be a selectable config option anymore. Should be
removed.

> + memset(obj->short_name, 0, sizeof(obj->short_name));
> + if (name &&
> + strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
> + YAFFS_SHORT_NAME_LENGTH)
> + strcpy(obj->short_name, name);
> + else
> + obj->short_name[0] = _Y('\0');
> +#endif
> + obj->sum = yaffs_calc_name_sum(name);
> +}
> +
> +void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
> + const struct yaffs_obj_hdr *oh)
> +{
> +#ifdef CONFIG_YAFFS_AUTO_UNICODE

Same here. Remove this ifdef.

> + YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
> + memset(tmp_name, 0, sizeof(tmp_name));
> + yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
> + YAFFS_MAX_NAME_LENGTH + 1);
> + yaffs_set_obj_name(obj, tmp_name);
> +#else
> + yaffs_set_obj_name(obj, oh->name);
> +#endif
> +}
> +
> +/*-------------------- TNODES -------------------
> +
> + * List of spare tnodes
> + * The list is hooked together using the first pointer
> + * in the tnode.
> + */
> +
> +struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
> +{
> + struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
> +
> + if (tn) {
> + memset(tn, 0, dev->tnode_size);
> + dev->n_tnodes++;
> + }
> +
> + dev->checkpoint_blocks_required = 0; /* force recalculation */
> +
> + return tn;
> +}
> +
> +/* FreeTnode frees up a tnode and puts it back on the free list */

Funciton name in the comment is wrong.

> +static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
> +{
> + yaffs_free_raw_tnode(dev, tn);
> + dev->n_tnodes--;
> + dev->checkpoint_blocks_required = 0; /* force recalculation */
> +}
> +
> +static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
> +{
> + yaffs_deinit_raw_tnodes_and_objs(dev);
> + dev->n_obj = 0;
> + dev->n_tnodes = 0;
> +}
> +
> +void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
> + unsigned pos, unsigned val)
> +{
> + u32 *map = (u32 *) tn;
> + u32 bit_in_map;
> + u32 bit_in_word;
> + u32 word_in_map;
> + u32 mask;
> +
> + pos &= YAFFS_TNODES_LEVEL0_MASK;
> + val >>= dev->chunk_grp_bits;
> +
> + bit_in_map = pos * dev->tnode_width;
> + word_in_map = bit_in_map / 32;
> + bit_in_word = bit_in_map & (32 - 1);
> +
> + mask = dev->tnode_mask << bit_in_word;
> +
> + map[word_in_map] &= ~mask;
> + map[word_in_map] |= (mask & (val << bit_in_word));
> +
> + if (dev->tnode_width > (32 - bit_in_word)) {
> + bit_in_word = (32 - bit_in_word);
> + word_in_map++;
> + mask =
> + dev->tnode_mask >> bit_in_word;
> + map[word_in_map] &= ~mask;
> + map[word_in_map] |= (mask & (val >> bit_in_word));
> + }
> +}
> +
> +u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
> + unsigned pos)
> +{
> + u32 *map = (u32 *) tn;
> + u32 bit_in_map;
> + u32 bit_in_word;
> + u32 word_in_map;
> + u32 val;
> +
> + pos &= YAFFS_TNODES_LEVEL0_MASK;
> +
> + bit_in_map = pos * dev->tnode_width;
> + word_in_map = bit_in_map / 32;
> + bit_in_word = bit_in_map & (32 - 1);
> +
> + val = map[word_in_map] >> bit_in_word;
> +
> + if (dev->tnode_width > (32 - bit_in_word)) {
> + bit_in_word = (32 - bit_in_word);
> + word_in_map++;
> + val |= (map[word_in_map] << bit_in_word);
> + }
> +
> + val &= dev->tnode_mask;
> + val <<= dev->chunk_grp_bits;
> +
> + return val;
> +}
> +
> +/* ------------------- End of individual tnode manipulation -----------------*/
> +
> +/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
> + * The look up tree is represented by the top tnode and the number of top_level
> + * in the tree. 0 means only the level 0 tnode is in the tree.
> + */
> +
> +/* FindLevel0Tnode finds the level 0 tnode, if one exists. */

Function name in comment is wrong.

> +struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
> + struct yaffs_file_var *file_struct,
> + u32 chunk_id)
> +{
> + struct yaffs_tnode *tn = file_struct->top;
> + u32 i;
> + int required_depth;
> + int level = file_struct->top_level;
> +
> + dev = dev;

Delete this line.

> +
> + /* Check sane level and chunk Id */
> + if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
> + return NULL;
> +
> + if (chunk_id > YAFFS_MAX_CHUNK_ID)
> + return NULL;
> +
> + /* First check we're tall enough (ie enough top_level) */
> +
> + i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
> + required_depth = 0;
> + while (i) {
> + i >>= YAFFS_TNODES_INTERNAL_BITS;
> + required_depth++;
> + }
> +
> + if (required_depth > file_struct->top_level)
> + return NULL; /* Not tall enough, so we can't find it */
> +
> + /* Traverse down to level 0 */
> + while (level > 0 && tn) {
> + tn = tn->internal[(chunk_id >>
> + (YAFFS_TNODES_LEVEL0_BITS +
> + (level - 1) *
> + YAFFS_TNODES_INTERNAL_BITS)) &
> + YAFFS_TNODES_INTERNAL_MASK];
> + level--;
> + }
> +
> + return tn;
> +}
> +
> +/* add_find_tnode_0 finds the level 0 tnode if it exists,
> + * otherwise first expands the tree.
> + * This happens in two steps:
> + * 1. If the tree isn't tall enough, then make it taller.
> + * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
> + *
> + * Used when modifying the tree.
> + *
> + * If the tn argument is NULL, then a fresh tnode will be added otherwise the
> + * specified tn will be plugged into the ttree.
> + */
> +
> +struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
> + struct yaffs_file_var *file_struct,
> + u32 chunk_id,
> + struct yaffs_tnode *passed_tn)
> +{
> + int required_depth;
> + int i;
> + int l;
> + struct yaffs_tnode *tn;
> + u32 x;
> +
> + /* Check sane level and page Id */
> + if (file_struct->top_level < 0 ||
> + file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
> + return NULL;
> +
> + if (chunk_id > YAFFS_MAX_CHUNK_ID)
> + return NULL;
> +
> + /* First check we're tall enough (ie enough top_level) */
> +
> + x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
> + required_depth = 0;
> + while (x) {
> + x >>= YAFFS_TNODES_INTERNAL_BITS;
> + required_depth++;
> + }
> +
> + if (required_depth > file_struct->top_level) {
> + /* Not tall enough, gotta make the tree taller */
> + for (i = file_struct->top_level; i < required_depth; i++) {
> +
> + tn = yaffs_get_tnode(dev);
> +
> + if (tn) {
> + tn->internal[0] = file_struct->top;
> + file_struct->top = tn;
> + file_struct->top_level++;
> + } else {
> + yaffs_trace(YAFFS_TRACE_ERROR,
> + "yaffs: no more tnodes");
> + return NULL;
> + }
> + }
> + }
> +
> + /* Traverse down to level 0, adding anything we need */
> +
> + l = file_struct->top_level;
> + tn = file_struct->top;
> +
> + if (l > 0) {
> + while (l > 0 && tn) {
> + x = (chunk_id >>
> + (YAFFS_TNODES_LEVEL0_BITS +
> + (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
> + YAFFS_TNODES_INTERNAL_MASK;
> +
> + if ((l > 1) && !tn->internal[x]) {
> + /* Add missing non-level-zero tnode */
> + tn->internal[x] = yaffs_get_tnode(dev);
> + if (!tn->internal[x])
> + return NULL;
> + } else if (l == 1) {
> + /* Looking from level 1 at level 0 */
> + if (passed_tn) {
> + /* If we already have one, release it */
> + if (tn->internal[x])
> + yaffs_free_tnode(dev,
> + tn->internal[x]);
> + tn->internal[x] = passed_tn;
> +
> + } else if (!tn->internal[x]) {
> + /* Don't have one, none passed in */
> + tn->internal[x] = yaffs_get_tnode(dev);
> + if (!tn->internal[x])
> + return NULL;
> + }
> + }
> +
> + tn = tn->internal[x];
> + l--;
> + }
> + } else {
> + /* top is level 0 */
> + if (passed_tn) {
> + memcpy(tn, passed_tn,
> + (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
> + yaffs_free_tnode(dev, passed_tn);
> + }
> + }
> +
> + return tn;
> +}
> +
> +static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
> + int chunk_obj)
> +{
> + return (tags->chunk_id == chunk_obj &&
> + tags->obj_id == obj_id &&
> + !tags->is_deleted) ? 1 : 0;
> +
> +}
> +
> +static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
> + struct yaffs_ext_tags *tags, int obj_id,
> + int inode_chunk)
> +{
> + int j;
> +
> + for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
> + if (yaffs_check_chunk_bit
> + (dev, the_chunk / dev->param.chunks_per_block,
> + the_chunk % dev->param.chunks_per_block)) {
> +
> + if (dev->chunk_grp_size == 1)
> + return the_chunk;
> + else {
> + yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
> + tags);
> + if (yaffs_tags_match(tags,
> + obj_id, inode_chunk)) {
> + /* found it; */
> + return the_chunk;
> + }
> + }
> + }
> + the_chunk++;
> + }
> + return -1;
> +}
> +
> +static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
> + struct yaffs_ext_tags *tags)
> +{
> + /*Get the Tnode, then get the level 0 offset chunk offset */
> + struct yaffs_tnode *tn;
> + int the_chunk = -1;
> + struct yaffs_ext_tags local_tags;
> + int ret_val = -1;
> + struct yaffs_dev *dev = in->my_dev;
> +
> + if (!tags) {
> + /* Passed a NULL, so use our own tags space */
> + tags = &local_tags;
> + }
> +
> + tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
> +
> + if (!tn)
> + return ret_val;
> +
> + the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
> +
> + ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
> + inode_chunk);
> + return ret_val;

You don't need the intermediate ret_val here, just do the return directly.

> +}
> +
> +static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
> + struct yaffs_ext_tags *tags)
> +{
> + /* Get the Tnode, then get the level 0 offset chunk offset */
> + struct yaffs_tnode *tn;
> + int the_chunk = -1;
> + struct yaffs_ext_tags local_tags;
> + struct yaffs_dev *dev = in->my_dev;
> + int ret_val = -1;
> +
> + if (!tags) {
> + /* Passed a NULL, so use our own tags space */
> + tags = &local_tags;
> + }
> +
> + tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
> +
> + if (!tn)
> + return ret_val;
> +
> + the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
> +
> + ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
> + inode_chunk);
> +
> + /* Delete the entry in the filestructure (if found) */
> + if (ret_val != -1)
> + yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
> +
> + return ret_val;
> +}
> +
> +int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
> + int nand_chunk, int in_scan)
> +{
> + /* NB in_scan is zero unless scanning.
> + * For forward scanning, in_scan is > 0;
> + * for backward scanning in_scan is < 0
> + *
> + * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
> + */
> +
> + struct yaffs_tnode *tn;
> + struct yaffs_dev *dev = in->my_dev;
> + int existing_cunk;
> + struct yaffs_ext_tags existing_tags;
> + struct yaffs_ext_tags new_tags;
> + unsigned existing_serial, new_serial;
> +
> + if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
> + /* Just ignore an attempt at putting a chunk into a non-file
> + * during scanning.
> + * If it is not during Scanning then something went wrong!
> + */
> + if (!in_scan) {
> + yaffs_trace(YAFFS_TRACE_ERROR,
> + "yaffs tragedy:attempt to put data chunk into a non-file"
> + );
> + BUG();
> + }
> +
> + yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
> + return YAFFS_OK;
> + }
> +
> + tn = yaffs_add_find_tnode_0(dev,
> + &in->variant.file_variant,
> + inode_chunk, NULL);
> + if (!tn)
> + return YAFFS_FAIL;
> +
> + if (!nand_chunk)
> + /* Dummy insert, bail now */
> + return YAFFS_OK;
> +
> + existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);

s/cunk/chunk/ ? I'm too paranoid to google and see if 'cunk' is a real
word :-).

> +
> + if (in_scan != 0) {
> + /* If we're scanning then we need to test for duplicates
> + * NB This does not need to be efficient since it should only
> + * happen when the power fails during a write, then only one
> + * chunk should ever be affected.
> + *
> + * Correction for YAFFS2: This could happen quite a lot and we
> + * need to think about efficiency! TODO
> + * Update: For backward scanning we don't need to re-read tags
> + * so this is quite cheap.
> + */
> +
> + if (existing_cunk > 0) {
> + /* NB Right now existing chunk will not be real
> + * chunk_id if the chunk group size > 1
> + * thus we have to do a FindChunkInFile to get the
> + * real chunk id.
> + *
> + * We have a duplicate now we need to decide which
> + * one to use:
> + *
> + * Backwards scanning YAFFS2: The old one is what
> + * we use, dump the new one.
> + * YAFFS1: Get both sets of tags and compare serial
> + * numbers.
> + */
> +
> + if (in_scan > 0) {
> + /* Only do this for forward scanning */
> + yaffs_rd_chunk_tags_nand(dev,
> + nand_chunk,
> + NULL, &new_tags);
> +
> + /* Do a proper find */
> + existing_cunk =
> + yaffs_find_chunk_in_file(in, inode_chunk,
> + &existing_tags);
> + }
> +
> + if (existing_cunk <= 0) {
> + /*Hoosterman - how did this happen? */
> +
> + yaffs_trace(YAFFS_TRACE_ERROR,
> + "yaffs tragedy: existing chunk < 0 in scan"
> + );
> +
> + }
> +
> + /* NB The deleted flags should be false, otherwise
> + * the chunks will not be loaded during a scan
> + */
> +
> + if (in_scan > 0) {
> + new_serial = new_tags.serial_number;
> + existing_serial = existing_tags.serial_number;
> + }
> +
> + if ((in_scan > 0) &&
> + (existing_cunk <= 0 ||
> + ((existing_serial + 1) & 3) == new_serial)) {
> + /* Forward scanning.
> + * Use new
> + * Delete the old one and drop through to
> + * update the tnode
> + */
> + yaffs_chunk_del(dev, existing_cunk, 1,
> + __LINE__);
> + } else {
> + /* Backward scanning or we want to use the
> + * existing one
> + * Delete the new one and return early so that
> + * the tnode isn't changed
> + */
> + yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
> + return YAFFS_OK;
> + }
> + }
> +
> + }
> +
> + if (existing_cunk == 0)
> + in->n_data_chunks++;
> +
> + yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
> +
> + return YAFFS_OK;
> +}
> +
> +static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
> +{
> + struct yaffs_block_info *the_block;
> + unsigned block_no;
> +
> + yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
> +
> + block_no = chunk / dev->param.chunks_per_block;
> + the_block = yaffs_get_block_info(dev, block_no);
> + if (the_block) {
> + the_block->soft_del_pages++;
> + dev->n_free_chunks++;
> + yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
> + }
> +}
> +
> +/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
> + * the chunks in the file.

Old function name in comment.

> + * All soft deleting does is increment the block's softdelete count and pulls
> + * the chunk out of the tnode.
> + * Thus, essentially this is the same as DeleteWorker except that the chunks
> + * are soft deleted.
> + */
> +
> +static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
> + u32 level, int chunk_offset)
> +{
> + int i;
> + int the_chunk;
> + int all_done = 1;
> + struct yaffs_dev *dev = in->my_dev;
> +
> + if (!tn)
> + return 1;
> +
> + if (level > 0) {
> + for (i = YAFFS_NTNODES_INTERNAL - 1;
> + all_done && i >= 0;
> + i--) {
> + if (tn->internal[i]) {
> + all_done =
> + yaffs_soft_del_worker(in,
> + tn->internal[i],
> + level - 1,
> + (chunk_offset <<
> + YAFFS_TNODES_INTERNAL_BITS)
> + + i);
> + if (all_done) {
> + yaffs_free_tnode(dev,
> + tn->internal[i]);
> + tn->internal[i] = NULL;
> + } else {
> + /* Can this happen? */

Remove the empty else, or put a warning/bug in it.

> + }
> + }
> + }
> + return (all_done) ? 1 : 0;

Parens are unnecessary.

> + }
> +
> + /* level 0 */
> + for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {

Idenentation looks mucked up here. Delete the space before the for.

> + the_chunk = yaffs_get_group_base(dev, tn, i);
> + if (the_chunk) {
> + yaffs_soft_del_chunk(dev, the_chunk);
> + yaffs_load_tnode_0(dev, tn, i, 0);
> + }
> + }
> + return 1;
> +}
> +
> +static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
> +{
> + struct yaffs_dev *dev = obj->my_dev;
> + struct yaffs_obj *parent;
> +
> + yaffs_verify_obj_in_dir(obj);
> + parent = obj->parent;
> +
> + yaffs_verify_dir(parent);
> +
> + if (dev && dev->param.remove_obj_fn)
> + dev->param.remove_obj_fn(obj);
> +
> + list_del_init(&obj->siblings);
> + obj->parent = NULL;
> +
> + yaffs_verify_dir(parent);
> +}
> +
> +void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
> +{
> + if (!directory) {
> + yaffs_trace(YAFFS_TRACE_ALWAYS,
> + "tragedy: Trying to add an object to a null pointer directory"
> + );
> + BUG();
> + return;
> + }
> + if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
> + yaffs_trace(YAFFS_TRACE_ALWAYS,
> + "tragedy: Trying to add an object to a non-directory"
> + );
> + BUG();
> + }
> +
> + if (obj->siblings.prev == NULL) {
> + /* Not initialised */
> + BUG();
> + }
> +
> + yaffs_verify_dir(directory);
> +
> + yaffs_remove_obj_from_dir(obj);
> +
> + /* Now add it */
> + list_add(&obj->siblings, &directory->variant.dir_variant.children);
> + obj->parent = directory;
> +
> + if (directory == obj->my_dev->unlinked_dir
> + || directory == obj->my_dev->del_dir) {
> + obj->unlinked = 1;
> + obj->my_dev->n_unlinked_files++;
> + obj->rename_allowed = 0;
> + }
> +
> + yaffs_verify_dir(directory);
> + yaffs_verify_obj_in_dir(obj);
> +}
> +
> +static int yaffs_change_obj_name(struct yaffs_obj *obj,
> + struct yaffs_obj *new_dir,
> + const YCHAR *new_name, int force, int shadows)
> +{
> + int unlink_op;
> + int del_op;
> + struct yaffs_obj *existing_target;
> +
> + if (new_dir == NULL)
> + new_dir = obj->parent; /* use the old directory */
> +
> + if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
> + yaffs_trace(YAFFS_TRACE_ALWAYS,
> + "tragedy: yaffs_change_obj_name: new_dir is not a directory"
> + );
> + BUG();
> + }
> +
> + unlink_op = (new_dir == obj->my_dev->unlinked_dir);
> + del_op = (new_dir == obj->my_dev->del_dir);
> +
> + existing_target = yaffs_find_by_name(new_dir, new_name);
> +
> + /* If the object is a file going into the unlinked directory,
> + * then it is OK to just stuff it in since duplicate names are OK.
> + * else only proceed if the new name does not exist and we're putting
> + * it into a directory.
> + */
> + if (!(unlink_op || del_op || force ||
> + shadows > 0 || !existing_target) ||
> + new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
> + return YAFFS_FAIL;
> +
> + yaffs_set_obj_name(obj, new_name);
> + obj->dirty = 1;
> + yaffs_add_obj_to_dir(new_dir, obj);
> +
> + if (unlink_op)
> + obj->unlinked = 1;
> +
> + /* If it is a deletion then we mark it as a shrink for gc */
> + if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0)
> + return YAFFS_OK;
> +
> + return YAFFS_FAIL;
> +}
> +
> +/*------------------------ Short Operations Cache ------------------------------
> + * In many situations where there is no high level buffering a lot of
> + * reads might be short sequential reads, and a lot of writes may be short
> + * sequential writes. eg. scanning/writing a jpeg file.
> + * In these cases, a short read/write cache can provide a huge perfomance
> + * benefit with dumb-as-a-rock code.
> + * In Linux, the page cache provides read buffering and the short op cache
> + * provides write buffering.
> + *
> + * There are a small number (~10) of cache chunks per device so that we don't
> + * need a very intelligent search.
> + */
> +
> +static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
> +{
> + struct yaffs_dev *dev = obj->my_dev;
> + int i;
> + struct yaffs_cache *cache;
> + int n_caches = obj->my_dev->param.n_caches;
> +
> + for (i = 0; i < n_caches; i++) {
> + cache = &dev->cache[i];
> + if (cache->object == obj && cache->dirty)
> + return 1;
> + }
> +
> + return 0;
> +}
> +
> +static void yaffs_flush_file_cache(struct yaffs_obj *obj)
> +{
> + struct yaffs_dev *dev = obj->my_dev;
> + int lowest = -99; /* Stop compiler whining. */
> + int i;
> + struct yaffs_cache *cache;
> + int chunk_written = 0;
> + int n_caches = obj->my_dev->param.n_caches;
> +
> + if (n_caches < 1)
> + return;
> + do {
> + cache = NULL;
> +
> + /* Find the lowest dirty chunk for this object */
> + for (i = 0; i < n_caches; i++) {
> + if (dev->cache[i].object == obj &&
> + dev->cache[i].dirty) {
> + if (!cache ||
> + dev->cache[i].chunk_id < lowest) {
> + cache = &dev->cache[i];
> + lowest = cache->chunk_id;
> + }
> + }
> + }
> +
> + if (cache && !cache->locked) {
> + /* Write it out and free it up */
> + chunk_written =
> + yaffs_wr_data_obj(cache->object,
> + cache->chunk_id,
> + cache->data,
> + cache->n_bytes, 1);
> + cache->dirty = 0;
> + cache->object = NULL;
> + }
> + } while (cache && chunk_written > 0);
> +
> + if (cache)
> + /* Hoosterman, disk full while writing cache out. */
> + yaffs_trace(YAFFS_TRACE_ERROR,
> + "yaffs tragedy: no space during cache write");
> +}
> +
> +/*yaffs_flush_whole_cache(dev)
> + *
> + *
> + */
> +
> +void yaffs_flush_whole_cache(struct yaffs_dev *dev)
> +{
> + struct yaffs_obj *obj;
> + int n_caches = dev->param.n_caches;
> + int i;
> +
> + /* Find a dirty object in the cache and flush it...
> + * until there are no further dirty objects.
> + */
> + do {
> + obj = NULL;
> + for (i = 0; i < n_caches && !obj; i++) {
> + if (dev->cache[i].object && dev->cache[i].dirty)
> + obj = dev->cache[i].object;

Can we break out of the loop here?

> + }
> + if (obj)
> + yaffs_flush_file_cache(obj);
> + } while (obj);
> +
> +}
> +
> +/* Grab us a cache chunk for use.
> + * First look for an empty one.
> + * Then look for the least recently used non-dirty one.
> + * Then look for the least recently used dirty one...., flush and look again.
> + */
> +static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
> +{
> + int i;
> +
> + if (dev->param.n_caches > 0) {
> + for (i = 0; i < dev->param.n_caches; i++) {
> + if (!dev->cache[i].object)
> + return &dev->cache[i];
> + }
> + }
> + return NULL;
> +}
> +
> +static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
> +{
> + struct yaffs_cache *cache;
> + struct yaffs_obj *the_obj;
> + int usage;
> + int i;
> + int pushout;
> +
> + if (dev->param.n_caches < 1)
> + return NULL;
> +
> + /* Try find a non-dirty one... */
> +
> + cache = yaffs_grab_chunk_worker(dev);
> +
> + if (!cache) {
> + /* They were all dirty, find the LRU object and flush
> + * its cache, then find again.
> + * NB what's here is not very accurate,
> + * we actually flush the object with the LRU chunk.
> + */
> +
> + /* With locking we can't assume we can use entry zero,
> + * Set the_obj to a valid pointer for Coverity. */
> + the_obj = dev->cache[0].object;
> + usage = -1;
> + cache = NULL;
> + pushout = -1;
> +
> + for (i = 0; i < dev->param.n_caches; i++) {
> + if (dev->cache[i].object &&
> + !dev->cache[i].locked &&
> + (dev->cache[i].last_use < usage ||
> + !cache)) {
> + usage = dev->cache[i].last_use;
> + the_obj = dev->cache[i].object;
> + cache = &dev->cache[i];
> + pushout = i;
> + }
> + }
> +
> + if (!cache || cache->dirty) {
> + /* Flush and try again */
> + yaffs_flush_file_cache(the_obj);
> + cache = yaffs_grab_chunk_worker(dev);
> + }
> + }
> + return cache;
> +}
> +
> +/* Find a cached chunk */
> +static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
> + int chunk_id)
> +{
> + struct yaffs_dev *dev = obj->my_dev;
> + int i;
> +
> + if (dev->param.n_caches < 1)
> + return NULL;
> +
> + for (i = 0; i < dev->param.n_caches; i++) {
> + if (dev->cache[i].object == obj &&
> + dev->cache[i].chunk_id == chunk_id) {
> + dev->cache_hits++;
> +
> + return &dev->cache[i];
> + }
> + }
> + return NULL;
> +}
> +
> +/* Mark the chunk for the least recently used algorithym */

Typo "algorithym".

> +static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
> + int is_write)
> +{
> + int i;
> +
> + if (dev->param.n_caches < 1)
> + return;
> +
> + if (dev->cache_last_use < 0 ||
> + dev->cache_last_use > 100000000) {

Should possibly be a define to make it more clear what the big magic
number means.

> + /* Reset the cache usages */
> + for (i = 1; i < dev->param.n_caches; i++)
> + dev->cache[i].last_use = 0;
> +
> + dev->cache_last_use = 0;
> + }
> + dev->cache_last_use++;
> + cache->last_use = dev->cache_last_use;
> +
> + if (is_write)
> + cache->dirty = 1;
> +}
> +
> +/* Invalidate a single cache page.
> + * Do this when a whole page gets written,
> + * ie the short cache for this page is no longer valid.
> + */
> +static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
> +{
> + struct yaffs_cache *cache;
> +
> + if (object->my_dev->param.n_caches > 0) {
> + cache = yaffs_find_chunk_cache(object, chunk_id);
> +
> + if (cache)
> + cache->object = NULL;
> + }
> +}
> +
> +/* Invalidate all the cache pages associated with this object
> + * Do this whenever ther file is deleted or resized.
> + */
> +static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
> +{
> + int i;
> + struct yaffs_dev *dev = in->my_dev;
> +
> + if (dev->param.n_caches > 0) {
> + /* Invalidate it. */
> + for (i = 0; i < dev->param.n_caches; i++) {
> + if (dev->cache[i].object == in)
> + dev->cache[i].object = NULL;
> + }
> + }
> +}
> +
> +static void yaffs_unhash_obj(struct yaffs_obj *obj)
> +{
> + int bucket;
> + struct yaffs_dev *dev = obj->my_dev;
> +
> + /* If it is still linked into the bucket list, free from the list */
> + if (!list_empty(&obj->hash_link)) {
> + list_del_init(&obj->hash_link);
> + bucket = yaffs_hash_fn(obj->obj_id);
> + dev->obj_bucket[bucket].count--;
> + }
> +}
> +
> +/* FreeObject frees up a Object and puts it back on the free list */

Old function name in comment.

> +static void yaffs_free_obj(struct yaffs_obj *obj)
> +{
> + struct yaffs_dev *dev;
> +
> + if (!obj) {
> + BUG();
> + return;
> + }
> + dev = obj->my_dev;
> + yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
> + obj, obj->my_inode);
> + if (obj->parent)
> + BUG();
> + if (!list_empty(&obj->siblings))
> + BUG();
> +
> + if (obj->my_inode) {
> + /* We're still hooked up to a cached inode.
> + * Don't delete now, but mark for later deletion
> + */
> + obj->defered_free = 1;
> + return;
> + }
> +
> + yaffs_unhash_obj(obj);
> +
> + yaffs_free_raw_obj(dev, obj);
> + dev->n_obj--;
> + dev->checkpoint_blocks_required = 0; /* force recalculation */
> +}
> +
> +void yaffs_handle_defered_free(struct yaffs_obj *obj)
> +{
> + if (obj->defered_free)
> + yaffs_free_obj(obj);
> +}
> +
> +static int yaffs_generic_obj_del(struct yaffs_obj *in)
> +{
> + /* Iinvalidate the file's data in the cache, without flushing. */

Typo "Iinvalidate".

> + yaffs_invalidate_whole_cache(in);
> +
> + if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) {
> + /* Move to unlinked directory so we have a deletion record */
> + yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
> + 0);
> + }
> +
> + yaffs_remove_obj_from_dir(in);
> + yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
> + in->hdr_chunk = 0;
> +
> + yaffs_free_obj(in);
> + return YAFFS_OK;
> +
> +}
> +
> +static void yaffs_soft_del_file(struct yaffs_obj *obj)
> +{
> + if (!obj->deleted ||
> + obj->variant_type != YAFFS_OBJECT_TYPE_FILE ||
> + obj->soft_del)
> + return;
> +
> + if (obj->n_data_chunks <= 0) {
> + /* Empty file with no duplicate object headers,
> + * just delete it immediately */
> + yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
> + obj->variant.file_variant.top = NULL;
> + yaffs_trace(YAFFS_TRACE_TRACING,
> + "yaffs: Deleting empty file %d",
> + obj->obj_id);
> + yaffs_generic_obj_del(obj);
> + } else {
> + yaffs_soft_del_worker(obj,
> + obj->variant.file_variant.top,
> + obj->variant.
> + file_variant.top_level, 0);
> + obj->soft_del = 1;
> + }
> +}
> +
> +/* Pruning removes any part of the file structure tree that is beyond the
> + * bounds of the file (ie that does not point to chunks).
> + *
> + * A file should only get pruned when its size is reduced.
> + *
> + * Before pruning, the chunks must be pulled from the tree and the
> + * level 0 tnode entries must be zeroed out.
> + * Could also use this for file deletion, but that's probably better handled
> + * by a special case.
> + *
> + * This function is recursive. For levels > 0 the function is called again on
> + * any sub-tree. For level == 0 we just check if the sub-tree has data.
> + * If there is no data in a subtree then it is pruned.
> + */
> +
> +static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
> + struct yaffs_tnode *tn, u32 level,
> + int del0)
> +{
> + int i;
> + int has_data;
> +
> + if (!tn)
> + return tn;
> +
> + has_data = 0;
> +
> + if (level > 0) {
> + for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
> + if (tn->internal[i]) {
> + tn->internal[i] =
> + yaffs_prune_worker(dev,
> + tn->internal[i],
> + level - 1,
> + (i == 0) ? del0 : 1);
> + }
> +
> + if (tn->internal[i])
> + has_data++;
> + }
> + } else {
> + int tnode_size_u32 = dev->tnode_size / sizeof(u32);
> + u32 *map = (u32 *) tn;
> +
> + for (i = 0; !has_data && i < tnode_size_u32; i++) {
> + if (map[i])
> + has_data++;
> + }
> + }
> +
> + if (has_data == 0 && del0) {
> + /* Free and return NULL */
> + yaffs_free_tnode(dev, tn);
> + tn = NULL;
> + }
> + return tn;
> +}
> +
> +static int yaffs_prune_tree(struct yaffs_dev *dev,
> + struct yaffs_file_var *file_struct)
> +{
> + int i;
> + int has_data;
> + int done = 0;
> + struct yaffs_tnode *tn;
> +
> + if (file_struct->top_level < 1)
> + return YAFFS_OK;
> +
> + file_struct->top =
> + yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
> +
> + /* Now we have a tree with all the non-zero branches NULL but
> + * the height is the same as it was.
> + * Let's see if we can trim internal tnodes to shorten the tree.
> + * We can do this if only the 0th element in the tnode is in use
> + * (ie all the non-zero are NULL)
> + */
> +
> + while (file_struct->top_level && !done) {
> + tn = file_struct->top;
> +
> + has_data = 0;
> + for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
> + if (tn->internal[i])
> + has_data++;
> + }
> +
> + if (!has_data) {
> + file_struct->top = tn->internal[0];
> + file_struct->top_level--;
> + yaffs_free_tnode(dev, tn);
> + } else {
> + done = 1;
> + }
> + }
> +
> + return YAFFS_OK;
> +}
> +
> +/*-------------------- End of File Structure functions.-------------------*/
> +
> +/* alloc_empty_obj gets us a clean Object.*/
> +static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
> +{
> + struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
> +
> + if (!obj)
> + return obj;
> +
> + dev->n_obj++;
> +
> + /* Now sweeten it up... */
> +
> + memset(obj, 0, sizeof(struct yaffs_obj));
> + obj->being_created = 1;
> +
> + obj->my_dev = dev;
> + obj->hdr_chunk = 0;
> + obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
> + INIT_LIST_HEAD(&(obj->hard_links));
> + INIT_LIST_HEAD(&(obj->hash_link));
> + INIT_LIST_HEAD(&obj->siblings);
> +
> + /* Now make the directory sane */
> + if (dev->root_dir) {
> + obj->parent = dev->root_dir;
> + list_add(&(obj->siblings),
> + &dev->root_dir->variant.dir_variant.children);
> + }
> +
> + /* Add it to the lost and found directory.
> + * NB Can't put root or lost-n-found in lost-n-found so
> + * check if lost-n-found exists first
> + */
> + if (dev->lost_n_found)
> + yaffs_add_obj_to_dir(dev->lost_n_found, obj);
> +
> + obj->being_created = 0;
> +
> + dev->checkpoint_blocks_required = 0; /* force recalculation */
> +
> + return obj;
> +}
> +
> +static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
> +{
> + int i;
> + int l = 999;
> + int lowest = 999999;

l/lowest = INT_MAX?

> +
> + /* Search for the shortest list or one that
> + * isn't too long.
> + */
> +
> + for (i = 0; i < 10 && lowest > 4; i++) {
> + dev->bucket_finder++;
> + dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
> + if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
> + lowest = dev->obj_bucket[dev->bucket_finder].count;
> + l = dev->bucket_finder;
> + }
> + }
> +
> + return l;
> +}
> +
> +static int yaffs_new_obj_id(struct yaffs_dev *dev)
> +{
> + int bucket = yaffs_find_nice_bucket(dev);
> + int found = 0;
> + struct list_head *i;
> + u32 n = (u32) bucket;
> +
> + /* Now find an object value that has not already been taken
> + * by scanning the list.
> + */
> +
> + while (!found) {
> + found = 1;
> + n += YAFFS_NOBJECT_BUCKETS;
> + if (1 || dev->obj_bucket[bucket].count > 0) {

Err, this if statement always succeeds. Is this left over testing code?

> + list_for_each(i, &dev->obj_bucket[bucket].list) {
> + /* If there is already one in the list */
> + if (i && list_entry(i, struct yaffs_obj,
> + hash_link)->obj_id == n) {
> + found = 0;
> + }
> + }
> + }
> + }
> + return n;
> +}
> +
> +static void yaffs_hash_obj(struct yaffs_obj *in)
> +{
> + int bucket = yaffs_hash_fn(in->obj_id);
> + struct yaffs_dev *dev = in->my_dev;
> +
> + list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
> + dev->obj_bucket[bucket].count++;
> +}
> +
> +struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
> +{
> + int bucket = yaffs_hash_fn(number);
> + struct list_head *i;
> + struct yaffs_obj *in;
> +
> + list_for_each(i, &dev->obj_bucket[bucket].list) {
> + /* Look if it is in the list */
> + in = list_entry(i, struct yaffs_obj, hash_link);
> + if (in->obj_id == number) {
> + /* Don't show if it is defered free */
> + if (in->defered_free)
> + return NULL;
> + return in;
> + }
> + }
> +
> + return NULL;
> +}
> +
> +struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
> + enum yaffs_obj_type type)
> +{
> + struct yaffs_obj *the_obj = NULL;
> + struct yaffs_tnode *tn = NULL;
> +
> + if (number < 0)
> + number = yaffs_new_obj_id(dev);
> +
> + if (type == YAFFS_OBJECT_TYPE_FILE) {
> + tn = yaffs_get_tnode(dev);
> + if (!tn)
> + return NULL;
> + }
> +
> + the_obj = yaffs_alloc_empty_obj(dev);
> + if (!the_obj) {
> + if (tn)
> + yaffs_free_tnode(dev, tn);
> + return NULL;
> + }
> +
> + the_obj->fake = 0;
> + the_obj->rename_allowed = 1;
> + the_obj->unlink_allowed = 1;
> + the_obj->obj_id = number;
> + yaffs_hash_obj(the_obj);
> + the_obj->variant_type = type;
> + yaffs_load_current_time(the_obj, 1, 1);
> +
> + switch (type) {
> + case YAFFS_OBJECT_TYPE_FILE:
> + the_obj->variant.file_variant.file_size = 0;
> + the_obj->variant.file_variant.scanned_size = 0;
> + the_obj->variant.file_variant.shrink_size = ~0; /* max */
> + the_obj->variant.file_variant.top_level = 0;
> + the_obj->variant.file_variant.top = tn;
> + break;
> + case YAFFS_OBJECT_TYPE_DIRECTORY:
> + INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
> + INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
> + break;
> + case YAFFS_OBJECT_TYPE_SYMLINK:
> + case YAFFS_OBJECT_TYPE_HARDLINK:
> + case YAFFS_OBJECT_TYPE_SPECIAL:
> + /* No action required */
> + break;
> + case YAFFS_OBJECT_TYPE_UNKNOWN:
> + /* todo this should not happen */
> + break;
> + }
> + return the_obj;
> +}
> +
> +static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
> + int number, u32 mode)
> +{
> +
> + struct yaffs_obj *obj =
> + yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
> +
> + if (!obj)
> + return NULL;
> +
> + obj->fake = 1; /* it is fake so it might not use NAND */
> + obj->rename_allowed = 0;
> + obj->unlink_allowed = 0;
> + obj->deleted = 0;
> + obj->unlinked = 0;
> + obj->yst_mode = mode;
> + obj->my_dev = dev;
> + obj->hdr_chunk = 0; /* Not a valid chunk. */
> + return obj;
> +
> +}
> +
> +
> +static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
> +{
> + int i;
> +
> + dev->n_obj = 0;
> + dev->n_tnodes = 0;
> + yaffs_init_raw_tnodes_and_objs(dev);
> +
> + for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
> + INIT_LIST_HEAD(&dev->obj_bucket[i].list);
> + dev->obj_bucket[i].count = 0;
> + }
> +}
> +
> +struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
> + int number,
> + enum yaffs_obj_type type)
> +{
> + struct yaffs_obj *the_obj = NULL;
> +
> + if (number > 0)
> + the_obj = yaffs_find_by_number(dev, number);
> +
> + if (!the_obj)
> + the_obj = yaffs_new_obj(dev, number, type);
> +
> + return the_obj;
> +
> +}
> +
> +YCHAR *yaffs_clone_str(const YCHAR *str)

kstrdup?

> +{
> + YCHAR *new_str = NULL;
> + int len;
> +
> + if (!str)
> + str = _Y("");
> +
> + len = strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
> + new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
> + if (new_str) {
> + strncpy(new_str, str, len);
> + new_str[len] = 0;
> + }
> + return new_str;
> +
> +}
> +/*
> + *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
> + * link (ie. name) is created or deleted in the directory.
> + *
> + * ie.
> + * create dir/a : update dir's mtime/ctime
> + * rm dir/a: update dir's mtime/ctime
> + * modify dir/a: don't update dir's mtimme/ctime
> + *
> + * This can be handled immediately or defered. Defering helps reduce the number
> + * of updates when many files in a directory are changed within a brief period.
> + *
> + * If the directory updating is defered then yaffs_update_dirty_dirs must be
> + * called periodically.
> + */
> +
> +static void yaffs_update_parent(struct yaffs_obj *obj)
> +{
> + struct yaffs_dev *dev;
> +
> + if (!obj)
> + return;
> + dev = obj->my_dev;
> + obj->dirty = 1;
> + yaffs_load_current_time(obj, 0, 1);
> + if (dev->param.defered_dir_update) {
> + struct list_head *link = &obj->variant.dir_variant.dirty;
> +
> + if (list_empty(link)) {
> + list_add(link, &dev->dirty_dirs);
> + yaffs_trace(YAFFS_TRACE_BACKGROUND,
> + "Added object %d to dirty directories",
> + obj->obj_id);
> + }
> +
> + } else {
> + yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
> + }
> +}
> +
> +void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
> +{
> + struct list_head *link;
> + struct yaffs_obj *obj;
> + struct yaffs_dir_var *d_s;
> + union yaffs_obj_var *o_v;
> +
> + yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
> +
> + while (!list_empty(&dev->dirty_dirs)) {
> + link = dev->dirty_dirs.next;
> + list_del_init(link);
> +
> + d_s = list_entry(link, struct yaffs_dir_var, dirty);
> + o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
> + obj = list_entry(o_v, struct yaffs_obj, variant);
> +
> + yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
> + obj->obj_id);
> +
> + if (obj->dirty)
> + yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
> + }
> +}
> +
> +/*
> + * Mknod (create) a new object.
> + * equiv_obj only has meaning for a hard link;
> + * alias_str only has meaning for a symlink.
> + * rdev only has meaning for devices (a subset of special objects)
> + */
> +
> +static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
> + struct yaffs_obj *parent,
> + const YCHAR *name,
> + u32 mode,
> + u32 uid,
> + u32 gid,
> + struct yaffs_obj *equiv_obj,
> + const YCHAR *alias_str, u32 rdev)
> +{
> + struct yaffs_obj *in;
> + YCHAR *str = NULL;
> + struct yaffs_dev *dev = parent->my_dev;
> +
> + /* Check if the entry exists.
> + * If it does then fail the call since we don't want a dup. */
> + if (yaffs_find_by_name(parent, name))
> + return NULL;
> +
> + if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
> + str = yaffs_clone_str(alias_str);
> + if (!str)
> + return NULL;
> + }
> +
> + in = yaffs_new_obj(dev, -1, type);
> +
> + if (!in) {
> + kfree(str);
> + return NULL;
> + }
> +
> + in->hdr_chunk = 0;
> + in->valid = 1;
> + in->variant_type = type;
> +
> + in->yst_mode = mode;
> +
> + yaffs_attribs_init(in, gid, uid, rdev);
> +
> + in->n_data_chunks = 0;
> +
> + yaffs_set_obj_name(in, name);
> + in->dirty = 1;
> +
> + yaffs_add_obj_to_dir(parent, in);
> +
> + in->my_dev = parent->my_dev;
> +
> + switch (type) {
> + case YAFFS_OBJECT_TYPE_SYMLINK:
> + in->variant.symlink_variant.alias = str;
> + break;
> + case YAFFS_OBJECT_TYPE_HARDLINK:
> + in->variant.hardlink_variant.equiv_obj = equiv_obj;
> + in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id;
> + list_add(&in->hard_links, &equiv_obj->hard_links);
> + break;
> + case YAFFS_OBJECT_TYPE_FILE:
> + case YAFFS_OBJECT_TYPE_DIRECTORY:
> + case YAFFS_OBJECT_TYPE_SPECIAL:
> + case YAFFS_OBJECT_TYPE_UNKNOWN:
> + /* do nothing */
> + break;
> + }
> +
> + if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
> + /* Could not create the object header, fail */
> + yaffs_del_obj(in);
> + in = NULL;
> + }
> +
> + if (in)
> + yaffs_update_parent(parent);
> +
> + return in;
> +}
> +
> +struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
> + const YCHAR *name, u32 mode, u32 uid,
> + u32 gid)
> +{
> + return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
> + uid, gid, NULL, NULL, 0);
> +}
> +
> +struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
> + u32 mode, u32 uid, u32 gid)
> +{
> + return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
> + mode, uid, gid, NULL, NULL, 0);
> +}
> +
> +struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
> + const YCHAR *name, u32 mode, u32 uid,
> + u32 gid, u32 rdev)
> +{
> + return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
> + uid, gid, NULL, NULL, rdev);
> +}
> +
> +struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
> + const YCHAR *name, u32 mode, u32 uid,
> + u32 gid, const YCHAR *alias)
> +{
> + return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
> + uid, gid, NULL, alias, 0);
> +}
> +
> +/* yaffs_link_obj returns the object id of the equivalent object.*/
> +struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
> + struct yaffs_obj *equiv_obj)
> +{
> + /* Get the real object in case we were fed a hard link obj */
> + equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
> +
> + if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK,
> + parent, name, 0, 0, 0,
> + equiv_obj, NULL, 0))
> + return equiv_obj;
> +
> + return NULL;
> +
> +}
> +
> +
> +
> +/*---------------------- Block Management and Page Allocation -------------*/
> +
> +static void yaffs_deinit_blocks(struct yaffs_dev *dev)
> +{
> + if (dev->block_info_alt && dev->block_info)
> + vfree(dev->block_info);
> + else
> + kfree(dev->block_info);
> +
> + dev->block_info_alt = 0;
> +
> + dev->block_info = NULL;
> +
> + if (dev->chunk_bits_alt && dev->chunk_bits)
> + vfree(dev->chunk_bits);
> + else
> + kfree(dev->chunk_bits);
> + dev->chunk_bits_alt = 0;
> + dev->chunk_bits = NULL;
> +}
> +
> +static int yaffs_init_blocks(struct yaffs_dev *dev)
> +{
> + int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
> +
> + dev->block_info = NULL;
> + dev->chunk_bits = NULL;
> + dev->alloc_block = -1; /* force it to get a new one */
> +
> + /* If the first allocation strategy fails, thry the alternate one */
> + dev->block_info =
> + kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
> + if (!dev->block_info) {
> + dev->block_info =
> + vmalloc(n_blocks * sizeof(struct yaffs_block_info));
> + dev->block_info_alt = 1;
> + } else {
> + dev->block_info_alt = 0;
> + }

I still think this should just use the one allocation scheme (probably
vmalloc) rather than attempting both. It makes the code a bit simpler.

> +
> + if (!dev->block_info)
> + goto alloc_error;
> +
> + /* Set up dynamic blockinfo stuff. Round up bytes. */
> + dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
> + dev->chunk_bits =
> + kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
> + if (!dev->chunk_bits) {
> + dev->chunk_bits =
> + vmalloc(dev->chunk_bit_stride * n_blocks);
> + dev->chunk_bits_alt = 1;
> + } else {
> + dev->chunk_bits_alt = 0;
> + }
> + if (!dev->chunk_bits)
> + goto alloc_error;
> +
> +
> + memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info));
> + memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
> + return YAFFS_OK;
> +
> +alloc_error:
> + yaffs_deinit_blocks(dev);
> + return YAFFS_FAIL;
> +}
> +
> +
> +void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
> +{
> + struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
> + int erased_ok = 0;
> + int i;
> +
> + /* If the block is still healthy erase it and mark as clean.
> + * If the block has had a data failure, then retire it.
> + */
> +
> + yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
> + "yaffs_block_became_dirty block %d state %d %s",
> + block_no, bi->block_state,
> + (bi->needs_retiring) ? "needs retiring" : "");
> +
> + yaffs2_clear_oldest_dirty_seq(dev, bi);
> +
> + bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
> +
> + /* If this is the block being garbage collected then stop gc'ing */
> + if (block_no == dev->gc_block)
> + dev->gc_block = 0;
> +
> + /* If this block is currently the best candidate for gc
> + * then drop as a candidate */
> + if (block_no == dev->gc_dirtiest) {
> + dev->gc_dirtiest = 0;
> + dev->gc_pages_in_use = 0;
> + }
> +
> + if (!bi->needs_retiring) {
> + yaffs2_checkpt_invalidate(dev);
> + erased_ok = yaffs_erase_block(dev, block_no);
> + if (!erased_ok) {
> + dev->n_erase_failures++;
> + yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
> + "**>> Erasure failed %d", block_no);
> + }
> + }
> +
> + /* Verify erasure if needed */
> + if (erased_ok &&
> + ((yaffs_trace_mask & YAFFS_TRACE_ERASE) ||
> + !yaffs_skip_verification(dev))) {
> + for (i = 0; i < dev->param.chunks_per_block; i++) {
> + if (!yaffs_check_chunk_erased(dev,
> + block_no * dev->param.chunks_per_block + i)) {
> + yaffs_trace(YAFFS_TRACE_ERROR,
> + ">>Block %d erasure supposedly OK, but chunk %d not erased",
> + block_no, i);
> + }
> + }
> + }
> +
> + if (!erased_ok) {
> + /* We lost a block of free space */
> + dev->n_free_chunks -= dev->param.chunks_per_block;
> + yaffs_retire_block(dev, block_no);
> + yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
> + "**>> Block %d retired", block_no);
> + return;
> + }
> +
> + /* Clean it up... */
> + bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
> + bi->seq_number = 0;
> + dev->n_erased_blocks++;
> + bi->pages_in_use = 0;
> + bi->soft_del_pages = 0;
> + bi->has_shrink_hdr = 0;
> + bi->skip_erased_check = 1; /* Clean, so no need to check */
> + bi->gc_prioritise = 0;
> + yaffs_clear_chunk_bits(dev, block_no);
> +
> + yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no);
> +}
> +
> +static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev,
> + struct yaffs_block_info *bi,
> + int old_chunk, u8 *buffer)
> +{
> + int new_chunk;
> + int mark_flash = 1;
> + struct yaffs_ext_tags tags;
> + struct yaffs_obj *object;
> + int matching_chunk;
> + int ret_val = YAFFS_OK;
> +
> + yaffs_init_tags(&tags);
> + yaffs_rd_chunk_tags_nand(dev, old_chunk,
> + buffer, &tags);
> + object = yaffs_find_by_number(dev, tags.obj_id);
> +
> + yaffs_trace(YAFFS_TRACE_GC_DETAIL,
> + "Collecting chunk in block %d, %d %d %d ",
> + dev->gc_chunk, tags.obj_id,
> + tags.chunk_id, tags.n_bytes);
> +
> + if (object && !yaffs_skip_verification(dev)) {
> + if (tags.chunk_id == 0)
> + matching_chunk =
> + object->hdr_chunk;
> + else if (object->soft_del)
> + /* Defeat the test */
> + matching_chunk = old_chunk;
> + else
> + matching_chunk =
> + yaffs_find_chunk_in_file
> + (object, tags.chunk_id,
> + NULL);
> +
> + if (old_chunk != matching_chunk)
> + yaffs_trace(YAFFS_TRACE_ERROR,
> + "gc: page in gc mismatch: %d %d %d %d",
> + old_chunk,
> + matching_chunk,
> + tags.obj_id,
> + tags.chunk_id);
> + }
> +
> + if (!object) {
> + yaffs_trace(YAFFS_TRACE_ERROR,
> + "page %d in gc has no object: %d %d %d ",
> + old_chunk,
> + tags.obj_id, tags.chunk_id,
> + tags.n_bytes);
> + }
> +
> + if (object &&
> + object->deleted &&
> + object->soft_del && tags.chunk_id != 0) {
> + /* Data chunk in a soft deleted file,
> + * throw it away.
> + * It's a soft deleted data chunk,
> + * No need to copy this, just forget
> + * about it and fix up the object.
> + */
> +
> + /* Free chunks already includes
> + * softdeleted chunks, how ever this
> + * chunk is going to soon be really
> + * deleted which will increment free
> + * chunks. We have to decrement free
> + * chunks so this works out properly.
> + */
> + dev->n_free_chunks--;
> + bi->soft_del_pages--;
> +
> + object->n_data_chunks--;
> + if (object->n_data_chunks <= 0) {
> + /* remeber to clean up obj */
> + dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id;
> + dev->n_clean_ups++;
> + }
> + mark_flash = 0;
> + } else if (object) {
> + /* It's either a data chunk in a live
> + * file or an ObjectHeader, so we're
> + * interested in it.
> + * NB Need to keep the ObjectHeaders of
> + * deleted files until the whole file
> + * has been deleted off
> + */
> + tags.serial_number++;
> + dev->n_gc_copies++;
> +
> + if (tags.chunk_id == 0) {
> + /* It is an object Id,
> + * We need to nuke the
> + * shrinkheader flags since its
> + * work is done.
> + * Also need to clean up
> + * shadowing.
> + */
> + struct yaffs_obj_hdr *oh;
> + oh = (struct yaffs_obj_hdr *) buffer;
> +
> + oh->is_shrink = 0;
> + tags.extra_is_shrink = 0;
> + oh->shadows_obj = 0;
> + oh->inband_shadowed_obj_id = 0;
> + tags.extra_shadows = 0;
> +
> + /* Update file size */
> + if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) {
> + oh->file_size =
> + object->variant.file_variant.file_size;
> + tags.extra_length = oh->file_size;
> + }
> +
> + yaffs_verify_oh(object, oh, &tags, 1);
> + new_chunk =
> + yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1);
> + } else {
> + new_chunk =
> + yaffs_write_new_chunk(dev, buffer, &tags, 1);
> + }
> +
> + if (new_chunk < 0) {
> + ret_val = YAFFS_FAIL;
> + } else {
> +
> + /* Now fix up the Tnodes etc. */
> +
> + if (tags.chunk_id == 0) {
> + /* It's a header */
> + object->hdr_chunk = new_chunk;
> + object->serial = tags.serial_number;
> + } else {
> + /* It's a data chunk */
> + yaffs_put_chunk_in_file(object, tags.chunk_id,
> + new_chunk, 0);
> + }
> + }
> + }
> + if (ret_val == YAFFS_OK)
> + yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__);
> + return ret_val;
> +}
> +
> +static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
> +{
> + int old_chunk;
> + int ret_val = YAFFS_OK;
> + int i;
> + int is_checkpt_block;
> + int max_copies;
> + int chunks_before = yaffs_get_erased_chunks(dev);
> + int chunks_after;
> + struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
> +
> + is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
> +
> + yaffs_trace(YAFFS_TRACE_TRACING,
> + "Collecting block %d, in use %d, shrink %d, whole_block %d",
> + block, bi->pages_in_use, bi->has_shrink_hdr,
> + whole_block);
> +
> + /*yaffs_verify_free_chunks(dev); */
> +
> + if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
> + bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
> +
> + bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
> +
> + dev->gc_disable = 1;
> +
> + if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
> + yaffs_trace(YAFFS_TRACE_TRACING,
> + "Collecting block %d that has no chunks in use",
> + block);
> + yaffs_block_became_dirty(dev, block);
> + } else {
> +
> + u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__);
> +
> + yaffs_verify_blk(dev, bi, block);
> +
> + max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
> + old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
> +
> + for (/* init already done */ ;
> + ret_val == YAFFS_OK &&
> + dev->gc_chunk < dev->param.chunks_per_block &&
> + (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
> + max_copies > 0;
> + dev->gc_chunk++, old_chunk++) {
> + if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
> + /* Page is in use and might need to be copied */
> + max_copies--;
> + ret_val = yaffs_gc_process_chunk(dev, bi,
> + old_chunk, buffer);
> + }
> + }
> + yaffs_release_temp_buffer(dev, buffer, __LINE__);
> + }
> +
> + yaffs_verify_collected_blk(dev, bi, block);
> +
> + if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
> + /*
> + * The gc did not complete. Set block state back to FULL
> + * because checkpointing does not restore gc.
> + */
> + bi->block_state = YAFFS_BLOCK_STATE_FULL;
> + } else {
> + /* The gc completed. */
> + /* Do any required cleanups */
> + for (i = 0; i < dev->n_clean_ups; i++) {
> + /* Time to delete the file too */
> + struct yaffs_obj *object =
> + yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
> + if (object) {
> + yaffs_free_tnode(dev,
> + object->variant.file_variant.top);
> + object->variant.file_variant.top = NULL;
> + yaffs_trace(YAFFS_TRACE_GC,
> + "yaffs: About to finally delete object %d",
> + object->obj_id);
> + yaffs_generic_obj_del(object);
> + object->my_dev->n_deleted_files--;
> + }
> +
> + }
> + chunks_after = yaffs_get_erased_chunks(dev);
> + if (chunks_before >= chunks_after)
> + yaffs_trace(YAFFS_TRACE_GC,
> + "gc did not increase free chunks before %d after %d",
> + chunks_before, chunks_after);
> + dev->gc_block = 0;
> + dev->gc_chunk = 0;
> + dev->n_clean_ups = 0;
> + }
> +
> + dev->gc_disable = 0;
> +
> + return ret_val;
> +}
> +
> +/*
> + * find_gc_block() selects the dirtiest block (or close enough)
> + * for garbage collection.
> + */
> +
> +static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
> + int aggressive, int background)
> +{
> + int i;
> + int iterations;
> + unsigned selected = 0;
> + int prioritised = 0;
> + int prioritised_exist = 0;
> + struct yaffs_block_info *bi;
> + int threshold;
> +
> + /* First let's see if we need to grab a prioritised block */
> + if (dev->has_pending_prioritised_gc && !aggressive) {
> + dev->gc_dirtiest = 0;
> + bi = dev->block_info;
> + for (i = dev->internal_start_block;
> + i <= dev->internal_end_block && !selected; i++) {
> +
> + if (bi->gc_prioritise) {
> + prioritised_exist = 1;
> + if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
> + yaffs_block_ok_for_gc(dev, bi)) {
> + selected = i;
> + prioritised = 1;
> + }
> + }
> + bi++;
> + }
> +
> + /*
> + * If there is a prioritised block and none was selected then
> + * this happened because there is at least one old dirty block
> + * gumming up the works. Let's gc the oldest dirty block.
> + */
> +
> + if (prioritised_exist &&
> + !selected && dev->oldest_dirty_block > 0)
> + selected = dev->oldest_dirty_block;
> +
> + if (!prioritised_exist) /* None found, so we can clear this */
> + dev->has_pending_prioritised_gc = 0;
> + }
> +
> + /* If we're doing aggressive GC then we are happy to take a less-dirty
> + * block, and search harder.
> + * else (leasurely gc), then we only bother to do this if the
> + * block has only a few pages in use.
> + */
> +
> + if (!selected) {
> + int pages_used;
> + int n_blocks =
> + dev->internal_end_block - dev->internal_start_block + 1;
> + if (aggressive) {
> + threshold = dev->param.chunks_per_block;
> + iterations = n_blocks;
> + } else {
> + int max_threshold;
> +
> + if (background)
> + max_threshold = dev->param.chunks_per_block / 2;
> + else
> + max_threshold = dev->param.chunks_per_block / 8;
> +
> + if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
> + max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
> +
> + threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
> + if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
> + threshold = YAFFS_GC_PASSIVE_THRESHOLD;
> + if (threshold > max_threshold)
> + threshold = max_threshold;
> +
> + iterations = n_blocks / 16 + 1;
> + if (iterations > 100)
> + iterations = 100;
> + }
> +
> + for (i = 0;
> + i < iterations &&
> + (dev->gc_dirtiest < 1 ||
> + dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
> + i++) {
> + dev->gc_block_finder++;
> + if (dev->gc_block_finder < dev->internal_start_block ||
> + dev->gc_block_finder > dev->internal_end_block)
> + dev->gc_block_finder =
> + dev->internal_start_block;
> +
> + bi = yaffs_get_block_info(dev, dev->gc_block_finder);
> +
> + pages_used = bi->pages_in_use - bi->soft_del_pages;
> +
> + if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
> + pages_used < dev->param.chunks_per_block &&
> + (dev->gc_dirtiest < 1 ||
> + pages_used < dev->gc_pages_in_use) &&
> + yaffs_block_ok_for_gc(dev, bi)) {
> + dev->gc_dirtiest = dev->gc_block_finder;
> + dev->gc_pages_in_use = pages_used;
> + }
> + }
> +
> + if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
> + selected = dev->gc_dirtiest;
> + }
> +
> + /*
> + * If nothing has been selected for a while, try the oldest dirty
> + * because that's gumming up the works.
> + */
> +
> + if (!selected && dev->param.is_yaffs2 &&
> + dev->gc_not_done >= (background ? 10 : 20)) {
> + yaffs2_find_oldest_dirty_seq(dev);
> + if (dev->oldest_dirty_block > 0) {
> + selected = dev->oldest_dirty_block;
> + dev->gc_dirtiest = selected;
> + dev->oldest_dirty_gc_count++;
> + bi = yaffs_get_block_info(dev, selected);
> + dev->gc_pages_in_use =
> + bi->pages_in_use - bi->soft_del_pages;
> + } else {
> + dev->gc_not_done = 0;
> + }
> + }
> +
> + if (selected) {
> + yaffs_trace(YAFFS_TRACE_GC,
> + "GC Selected block %d with %d free, prioritised:%d",
> + selected,
> + dev->param.chunks_per_block - dev->gc_pages_in_use,
> + prioritised);
> +
> + dev->n_gc_blocks++;
> + if (background)
> + dev->bg_gcs++;
> +
> + dev->gc_dirtiest = 0;
> + dev->gc_pages_in_use = 0;
> + dev->gc_not_done = 0;
> + if (dev->refresh_skip > 0)
> + dev->refresh_skip--;
> + } else {
> + dev->gc_not_done++;
> + yaffs_trace(YAFFS_TRACE_GC,
> + "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
> + dev->gc_block_finder, dev->gc_not_done, threshold,
> + dev->gc_dirtiest, dev->gc_pages_in_use,
> + dev->oldest_dirty_block, background ? " bg" : "");
> + }
> +
> + return selected;
> +}
> +
> +/* New garbage collector
> + * If we're very low on erased blocks then we do aggressive garbage collection
> + * otherwise we do "leasurely" garbage collection.
> + * Aggressive gc looks further (whole array) and will accept less dirty blocks.
> + * Passive gc only inspects smaller areas and only accepts more dirty blocks.
> + *
> + * The idea is to help clear out space in a more spread-out manner.
> + * Dunno if it really does anything useful.
> + */
> +static int yaffs_check_gc(struct yaffs_dev *dev, int background)
> +{
> + int aggressive = 0;
> + int gc_ok = YAFFS_OK;
> + int max_tries = 0;
> + int min_erased;
> + int erased_chunks;
> + int checkpt_block_adjust;
> +
> + if (dev->param.gc_control && (dev->param.gc_control(dev) & 1) == 0)
> + return YAFFS_OK;
> +
> + if (dev->gc_disable)
> + /* Bail out so we don't get recursive gc */
> + return YAFFS_OK;
> +
> + /* This loop should pass the first time.
> + * Only loops here if the collection does not increase space.
> + */
> +
> + do {
> + max_tries++;
> +
> + checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
> +
> + min_erased =
> + dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
> + erased_chunks =
> + dev->n_erased_blocks * dev->param.chunks_per_block;
> +
> + /* If we need a block soon then do aggressive gc. */
> + if (dev->n_erased_blocks < min_erased)
> + aggressive = 1;
> + else {
> + if (!background
> + && erased_chunks > (dev->n_free_chunks / 4))
> + break;
> +
> + if (dev->gc_skip > 20)
> + dev->gc_skip = 20;
> + if (erased_chunks < dev->n_free_chunks / 2 ||
> + dev->gc_skip < 1 || background)
> + aggressive = 0;
> + else {
> + dev->gc_skip--;
> + break;
> + }
> + }
> +
> + dev->gc_skip = 5;
> +
> + /* If we don't already have a block being gc'd then see if we
> + * should start another */
> +
> + if (dev->gc_block < 1 && !aggressive) {
> + dev->gc_block = yaffs2_find_refresh_block(dev);
> + dev->gc_chunk = 0;
> + dev->n_clean_ups = 0;
> + }
> + if (dev->gc_block < 1) {
> + dev->gc_block =
> + yaffs_find_gc_block(dev, aggressive, background);
> + dev->gc_chunk = 0;
> + dev->n_clean_ups = 0;
> + }
> +
> + if (dev->gc_block > 0) {
> + dev->all_gcs++;
> + if (!aggressive)
> + dev->passive_gc_count++;
> +
> + yaffs_trace(YAFFS_TRACE_GC,
> + "yaffs: GC n_erased_blocks %d aggressive %d",
> + dev->n_erased_blocks, aggressive);
> +
> + gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
> + }
> +
> + if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) &&
> + dev->gc_block > 0) {
> + yaffs_trace(YAFFS_TRACE_GC,
> + "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
> + dev->n_erased_blocks, max_tries,
> + dev->gc_block);
> + }
> + } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
> + (dev->gc_block > 0) && (max_tries < 2));
> +
> + return aggressive ? gc_ok : YAFFS_OK;
> +}
> +
> +/*
> + * yaffs_bg_gc()
> + * Garbage collects. Intended to be called from a background thread.
> + * Returns non-zero if at least half the free chunks are erased.
> + */
> +int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
> +{
> + int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
> +
> + yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
> +
> + yaffs_check_gc(dev, 1);
> + return erased_chunks > dev->n_free_chunks / 2;
> +}
> +
> +/*-------------------- Data file manipulation -----------------*/
> +
> +static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
> +{
> + int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
> +
> + if (nand_chunk >= 0)
> + return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
> + buffer, NULL);
> + else {
> + yaffs_trace(YAFFS_TRACE_NANDACCESS,
> + "Chunk %d not found zero instead",
> + nand_chunk);
> + /* get sane (zero) data if you read a hole */
> + memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
> + return 0;
> + }
> +
> +}
> +
> +void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
> + int lyn)
> +{
> + int block;
> + int page;
> + struct yaffs_ext_tags tags;
> + struct yaffs_block_info *bi;
> +
> + if (chunk_id <= 0)
> + return;
> +
> + dev->n_deletions++;
> + block = chunk_id / dev->param.chunks_per_block;
> + page = chunk_id % dev->param.chunks_per_block;
> +
> + if (!yaffs_check_chunk_bit(dev, block, page))
> + yaffs_trace(YAFFS_TRACE_VERIFY,
> + "Deleting invalid chunk %d", chunk_id);
> +
> + bi = yaffs_get_block_info(dev, block);
> +
> + yaffs2_update_oldest_dirty_seq(dev, block, bi);
> +
> + yaffs_trace(YAFFS_TRACE_DELETION,
> + "line %d delete of chunk %d",
> + lyn, chunk_id);
> +
> + if (!dev->param.is_yaffs2 && mark_flash &&
> + bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
> +
> + yaffs_init_tags(&tags);
> + tags.is_deleted = 1;
> + yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
> + yaffs_handle_chunk_update(dev, chunk_id, &tags);
> + } else {
> + dev->n_unmarked_deletions++;
> + }
> +
> + /* Pull out of the management area.
> + * If the whole block became dirty, this will kick off an erasure.
> + */
> + if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
> + bi->block_state == YAFFS_BLOCK_STATE_FULL ||
> + bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
> + bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
> + dev->n_free_chunks++;
> + yaffs_clear_chunk_bit(dev, block, page);
> + bi->pages_in_use--;
> +
> + if (bi->pages_in_use == 0 &&
> + !bi->has_shrink_hdr &&
> + bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
> + bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) {
> + yaffs_block_became_dirty(dev, block);
> + }
> + }
> +}
> +
> +static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
> + const u8 *buffer, int n_bytes, int use_reserve)
> +{
> + /* Find old chunk Need to do this to get serial number
> + * Write new one and patch into tree.
> + * Invalidate old tags.
> + */
> +
> + int prev_chunk_id;
> + struct yaffs_ext_tags prev_tags;
> + int new_chunk_id;
> + struct yaffs_ext_tags new_tags;
> + struct yaffs_dev *dev = in->my_dev;
> +
> + yaffs_check_gc(dev, 0);
> +
> + /* Get the previous chunk at this location in the file if it exists.
> + * If it does not exist then put a zero into the tree. This creates
> + * the tnode now, rather than later when it is harder to clean up.
> + */
> + prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
> + if (prev_chunk_id < 1 &&
> + !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
> + return 0;
> +
> + /* Set up new tags */
> + yaffs_init_tags(&new_tags);
> +
> + new_tags.chunk_id = inode_chunk;
> + new_tags.obj_id = in->obj_id;
> + new_tags.serial_number =
> + (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
> + new_tags.n_bytes = n_bytes;
> +
> + if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
> + yaffs_trace(YAFFS_TRACE_ERROR,
> + "Writing %d bytes to chunk!!!!!!!!!",
> + n_bytes);
> + BUG();
> + }
> +
> + new_chunk_id =
> + yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
> +
> + if (new_chunk_id > 0) {
> + yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
> +
> + if (prev_chunk_id > 0)
> + yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
> +
> + yaffs_verify_file_sane(in);
> + }
> + return new_chunk_id;
> +
> +}
> +
> +
> +
> +static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
> + const YCHAR *name, const void *value, int size,
> + int flags)
> +{
> + struct yaffs_xattr_mod xmod;
> + int result;
> +
> + xmod.set = set;
> + xmod.name = name;
> + xmod.data = value;
> + xmod.size = size;
> + xmod.flags = flags;
> + xmod.result = -ENOSPC;
> +
> + result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
> +
> + if (result > 0)
> + return xmod.result;
> + else
> + return -ENOSPC;
> +}
> +
> +static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
> + struct yaffs_xattr_mod *xmod)
> +{
> + int retval = 0;
> + int x_offs = sizeof(struct yaffs_obj_hdr);
> + struct yaffs_dev *dev = obj->my_dev;
> + int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
> + char *x_buffer = buffer + x_offs;
> +
> + if (xmod->set)
> + retval =
> + nval_set(x_buffer, x_size, xmod->name, xmod->data,
> + xmod->size, xmod->flags);
> + else
> + retval = nval_del(x_buffer, x_size, xmod->name);
> +
> + obj->has_xattr = nval_hasvalues(x_buffer, x_size);
> + obj->xattr_known = 1;
> + xmod->result = retval;
> +
> + return retval;
> +}
> +
> +static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name,
> + void *value, int size)
> +{
> + char *buffer = NULL;
> + int result;
> + struct yaffs_ext_tags tags;
> + struct yaffs_dev *dev = obj->my_dev;
> + int x_offs = sizeof(struct yaffs_obj_hdr);
> + int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
> + char *x_buffer;
> + int retval = 0;
> +
> + if (obj->hdr_chunk < 1)
> + return -ENODATA;
> +
> + /* If we know that the object has no xattribs then don't do all the
> + * reading and parsing.
> + */
> + if (obj->xattr_known && !obj->has_xattr) {
> + if (name)
> + return -ENODATA;
> + else
> + return 0;
> + }
> +
> + buffer = (char *)yaffs_get_temp_buffer(dev, __LINE__);

Possibly yaffs_get_temp_buffer should return void * to avoid this sort
of casting.

> + if (!buffer)
> + return -ENOMEM;
> +
> + result =
> + yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);

But then you cast it back to u8 *?

> +
> + if (result != YAFFS_OK)
> + retval = -ENOENT;
> + else {
> + x_buffer = buffer + x_offs;
> +
> + if (!obj->xattr_known) {
> + obj->has_xattr = nval_hasvalues(x_buffer, x_size);
> + obj->xattr_known = 1;
> + }
> +
> + if (name)
> + retval = nval_get(x_buffer, x_size, name, value, size);
> + else
> + retval = nval_list(x_buffer, x_size, value, size);
> + }
> + yaffs_release_temp_buffer(dev, (u8 *) buffer, __LINE__);
> + return retval;
> +}
> +
> +int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
> + const void *value, int size, int flags)
> +{
> + return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
> +}
> +
> +int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
> +{
> + return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
> +}
> +
> +int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
> + int size)
> +{
> + return yaffs_do_xattrib_fetch(obj, name, value, size);
> +}
> +
> +int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
> +{
> + return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
> +}
> +
> +static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
> +{
> + u8 *buf;
> + struct yaffs_obj_hdr *oh;
> + struct yaffs_dev *dev;
> + struct yaffs_ext_tags tags;
> + int result;
> + int alloc_failed = 0;
> +
> + if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
> + return;
> +
> + dev = in->my_dev;
> + in->lazy_loaded = 0;
> + buf = yaffs_get_temp_buffer(dev, __LINE__);
> +
> + result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);

result is unused.

> + oh = (struct yaffs_obj_hdr *)buf;
> +
> + in->yst_mode = oh->yst_mode;
> + yaffs_load_attribs(in, oh);
> + yaffs_set_obj_name_from_oh(in, oh);
> +
> + if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
> + in->variant.symlink_variant.alias =
> + yaffs_clone_str(oh->alias);
> + if (!in->variant.symlink_variant.alias)
> + alloc_failed = 1; /* Not returned */
> + }
> + yaffs_release_temp_buffer(dev, buf, __LINE__);
> +}
> +
> +static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name,
> + const YCHAR *oh_name, int buff_size)
> +{
> +#ifdef CONFIG_YAFFS_AUTO_UNICODE

Remove this ifdef, it is not configurable.

> + if (dev->param.auto_unicode) {
> + if (*oh_name) {
> + /* It is an ASCII name, do an ASCII to
> + * unicode conversion */
> + const char *ascii_oh_name = (const char *)oh_name;
> + int n = buff_size - 1;
> + while (n > 0 && *ascii_oh_name) {
> + *name = *ascii_oh_name;
> + name++;
> + ascii_oh_name++;
> + n--;
> + }
> + } else {
> + strncpy(name, oh_name + 1, buff_size - 1);
> + }
> + } else {
> +#else
> + {
> +#endif
> + strncpy(name, oh_name, buff_size - 1);
> + }
> +}
> +
> +static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name,
> + const YCHAR *name)
> +{
> +#ifdef CONFIG_YAFFS_AUTO_UNICODE

Same again.

> +
> + int is_ascii;
> + YCHAR *w;
> +
> + if (dev->param.auto_unicode) {
> +
> + is_ascii = 1;
> + w = name;
> +
> + /* Figure out if the name will fit in ascii character set */
> + while (is_ascii && *w) {
> + if ((*w) & 0xff00)
> + is_ascii = 0;
> + w++;
> + }
> +
> + if (is_ascii) {
> + /* It is an ASCII name, so convert unicode to ascii */
> + char *ascii_oh_name = (char *)oh_name;
> + int n = YAFFS_MAX_NAME_LENGTH - 1;
> + while (n > 0 && *name) {
> + *ascii_oh_name = *name;
> + name++;
> + ascii_oh_name++;
> + n--;
> + }
> + } else {
> + /* Unicode name, so save starting at the second YCHAR */
> + *oh_name = 0;
> + strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2);
> + }
> + } else {
> +#else
> + {
> +#endif
> + strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
> + }
> +}
> +
> +/* UpdateObjectHeader updates the header on NAND for an object.

Old name in comment.

> + * If name is not NULL, then that new name is used.
> + */
> +int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force,
> + int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
> +{
> +
> + struct yaffs_block_info *bi;
> + struct yaffs_dev *dev = in->my_dev;
> + int prev_chunk_id;
> + int ret_val = 0;
> + int result = 0;
> + int new_chunk_id;
> + struct yaffs_ext_tags new_tags;
> + struct yaffs_ext_tags old_tags;
> + const YCHAR *alias = NULL;
> + u8 *buffer = NULL;
> + YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
> + struct yaffs_obj_hdr *oh = NULL;
> +
> + strcpy(old_name, _Y("silly old name"));
> +
> + if (in->fake && in != dev->root_dir && !force && !xmod)
> + return ret_val;
> +
> + yaffs_check_gc(dev, 0);
> + yaffs_check_obj_details_loaded(in);
> +
> + buffer = yaffs_get_temp_buffer(in->my_dev, __LINE__);
> + oh = (struct yaffs_obj_hdr *)buffer;
> +
> + prev_chunk_id = in->hdr_chunk;
> +
> + if (prev_chunk_id > 0) {
> + result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
> + buffer, &old_tags);
> +
> + yaffs_verify_oh(in, oh, &old_tags, 0);
> + memcpy(old_name, oh->name, sizeof(oh->name));
> + memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr));
> + } else {
> + memset(buffer, 0xff, dev->data_bytes_per_chunk);
> + }
> +
> + oh->type = in->variant_type;
> + oh->yst_mode = in->yst_mode;
> + oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
> +
> + yaffs_load_attribs_oh(oh, in);
> +
> + if (in->parent)
> + oh->parent_obj_id = in->parent->obj_id;
> + else
> + oh->parent_obj_id = 0;
> +
> + if (name && *name) {
> + memset(oh->name, 0, sizeof(oh->name));
> + yaffs_load_oh_from_name(dev, oh->name, name);
> + } else if (prev_chunk_id > 0) {
> + memcpy(oh->name, old_name, sizeof(oh->name));
> + } else {
> + memset(oh->name, 0, sizeof(oh->name));
> + }
> +
> + oh->is_shrink = is_shrink;
> +
> + switch (in->variant_type) {
> + case YAFFS_OBJECT_TYPE_UNKNOWN:
> + /* Should not happen */
> + break;
> + case YAFFS_OBJECT_TYPE_FILE:
> + oh->file_size =
> + (oh->parent_obj_id == YAFFS_OBJECTID_DELETED ||
> + oh->parent_obj_id == YAFFS_OBJECTID_UNLINKED) ?
> + 0 : in->variant.file_variant.file_size;
> + break;
> + case YAFFS_OBJECT_TYPE_HARDLINK:
> + oh->equiv_id = in->variant.hardlink_variant.equiv_id;
> + break;
> + case YAFFS_OBJECT_TYPE_SPECIAL:
> + /* Do nothing */
> + break;
> + case YAFFS_OBJECT_TYPE_DIRECTORY:
> + /* Do nothing */
> + break;
> + case YAFFS_OBJECT_TYPE_SYMLINK:
> + alias = in->variant.symlink_variant.alias;
> + if (!alias)
> + alias = _Y("no alias");
> + strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
> + oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
> + break;
> + }
> +
> + /* process any xattrib modifications */
> + if (xmod)
> + yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);

The casting between u8 * and char * in this file is a bit annoying. Is
it possible to minimise this?

> +
> + /* Tags */
> + yaffs_init_tags(&new_tags);
> + in->serial++;
> + new_tags.chunk_id = 0;
> + new_tags.obj_id = in->obj_id;
> + new_tags.serial_number = in->serial;
> +
> + /* Add extra info for file header */
> + new_tags.extra_available = 1;
> + new_tags.extra_parent_id = oh->parent_obj_id;
> + new_tags.extra_length = oh->file_size;
> + new_tags.extra_is_shrink = oh->is_shrink;
> + new_tags.extra_equiv_id = oh->equiv_id;
> + new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
> + new_tags.extra_obj_type = in->variant_type;
> + yaffs_verify_oh(in, oh, &new_tags, 1);
> +
> + /* Create new chunk in NAND */
> + new_chunk_id =
> + yaffs_write_new_chunk(dev, buffer, &new_tags,
> + (prev_chunk_id > 0) ? 1 : 0);
> +
> + if (buffer)
> + yaffs_release_temp_buffer(dev, buffer, __LINE__);
> +
> + if (new_chunk_id < 0)
> + return new_chunk_id;
> +
> + in->hdr_chunk = new_chunk_id;
> +
> + if (prev_chunk_id > 0)
> + yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
> +
> + if (!yaffs_obj_cache_dirty(in))
> + in->dirty = 0;
> +
> + /* If this was a shrink, then mark the block
> + * that the chunk lives on */
> + if (is_shrink) {
> + bi = yaffs_get_block_info(in->my_dev,
> + new_chunk_id /
> + in->my_dev->param.chunks_per_block);
> + bi->has_shrink_hdr = 1;
> + }
> +
> +
> + return new_chunk_id;
> +}
> +
> +/*--------------------- File read/write ------------------------
> + * Read and write have very similar structures.
> + * In general the read/write has three parts to it
> + * An incomplete chunk to start with (if the read/write is not chunk-aligned)
> + * Some complete chunks
> + * An incomplete chunk to end off with
> + *
> + * Curve-balls: the first chunk might also be the last chunk.
> + */
> +
> +int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
> +{
> + int chunk;
> + u32 start;
> + int n_copy;
> + int n = n_bytes;
> + int n_done = 0;
> + struct yaffs_cache *cache;
> + struct yaffs_dev *dev;
> +
> + dev = in->my_dev;
> +
> + while (n > 0) {
> + yaffs_addr_to_chunk(dev, offset, &chunk, &start);
> + chunk++;
> +
> + /* OK now check for the curveball where the start and end are in
> + * the same chunk.
> + */
> + if ((start + n) < dev->data_bytes_per_chunk)
> + n_copy = n;
> + else
> + n_copy = dev->data_bytes_per_chunk - start;
> +
> + cache = yaffs_find_chunk_cache(in, chunk);
> +
> + /* If the chunk is already in the cache or it is less than
> + * a whole chunk or we're using inband tags then use the cache
> + * (if there is caching) else bypass the cache.
> + */
> + if (cache || n_copy != dev->data_bytes_per_chunk ||
> + dev->param.inband_tags) {
> + if (dev->param.n_caches > 0) {
> +
> + /* If we can't find the data in the cache,
> + * then load it up. */
> +
> + if (!cache) {
> + cache =
> + yaffs_grab_chunk_cache(in->my_dev);
> + cache->object = in;
> + cache->chunk_id = chunk;
> + cache->dirty = 0;
> + cache->locked = 0;
> + yaffs_rd_data_obj(in, chunk,
> + cache->data);
> + cache->n_bytes = 0;
> + }
> +
> + yaffs_use_cache(dev, cache, 0);
> +
> + cache->locked = 1;
> +
> + memcpy(buffer, &cache->data[start], n_copy);
> +
> + cache->locked = 0;

This looks a bit odd. If cache->locked is meant to be protecting
something then should it either being using atomic ops (and appropriate
barriers) or a proper lock. Are we potentially in parallel here with
something else which could be checking cache->locked? If so this code is
not probably not safe. If not, toggling cache->locked is not
particularly useful.

> + } else {
> + /* Read into the local buffer then copy.. */
> +
> + u8 *local_buffer =
> + yaffs_get_temp_buffer(dev, __LINE__);
> + yaffs_rd_data_obj(in, chunk, local_buffer);
> +
> + memcpy(buffer, &local_buffer[start], n_copy);
> +
> + yaffs_release_temp_buffer(dev, local_buffer,
> + __LINE__);
> + }
> + } else {
> + /* A full chunk. Read directly into the buffer. */
> + yaffs_rd_data_obj(in, chunk, buffer);
> + }
> + n -= n_copy;
> + offset += n_copy;
> + buffer += n_copy;
> + n_done += n_copy;
> + }
> + return n_done;
> +}
> +
> +int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
> + int n_bytes, int write_trhrough)
> +{
> +
> + int chunk;
> + u32 start;
> + int n_copy;
> + int n = n_bytes;
> + int n_done = 0;
> + int n_writeback;
> + int start_write = offset;
> + int chunk_written = 0;
> + u32 n_bytes_read;
> + u32 chunk_start;
> + struct yaffs_dev *dev;
> +
> + dev = in->my_dev;
> +
> + while (n > 0 && chunk_written >= 0) {
> + yaffs_addr_to_chunk(dev, offset, &chunk, &start);
> +
> + if (chunk * dev->data_bytes_per_chunk + start != offset ||
> + start >= dev->data_bytes_per_chunk) {
> + yaffs_trace(YAFFS_TRACE_ERROR,
> + "AddrToChunk of offset %d gives chunk %d start %d",
> + (int)offset, chunk, start);
> + }
> + chunk++; /* File pos to chunk in file offset */
> +
> + /* OK now check for the curveball where the start and end are in
> + * the same chunk.
> + */
> +
> + if ((start + n) < dev->data_bytes_per_chunk) {
> + n_copy = n;
> +
> + /* Now calculate how many bytes to write back....
> + * If we're overwriting and not writing to then end of
> + * file then we need to write back as much as was there
> + * before.
> + */
> +
> + chunk_start = ((chunk - 1) * dev->data_bytes_per_chunk);
> +
> + if (chunk_start > in->variant.file_variant.file_size)
> + n_bytes_read = 0; /* Past end of file */
> + else
> + n_bytes_read =
> + in->variant.file_variant.file_size -
> + chunk_start;
> +
> + if (n_bytes_read > dev->data_bytes_per_chunk)
> + n_bytes_read = dev->data_bytes_per_chunk;
> +
> + n_writeback =
> + (n_bytes_read >
> + (start + n)) ? n_bytes_read : (start + n);
> +
> + if (n_writeback < 0 ||
> + n_writeback > dev->data_bytes_per_chunk)
> + BUG();
> +
> + } else {
> + n_copy = dev->data_bytes_per_chunk - start;
> + n_writeback = dev->data_bytes_per_chunk;
> + }
> +
> + if (n_copy != dev->data_bytes_per_chunk ||
> + dev->param.inband_tags) {
> + /* An incomplete start or end chunk (or maybe both
> + * start and end chunk), or we're using inband tags,
> + * so we want to use the cache buffers.
> + */
> + if (dev->param.n_caches > 0) {
> + struct yaffs_cache *cache;
> +
> + /* If we can't find the data in the cache, then
> + * load the cache */
> + cache = yaffs_find_chunk_cache(in, chunk);
> +
> + if (!cache &&
> + yaffs_check_alloc_available(dev, 1)) {
> + cache = yaffs_grab_chunk_cache(dev);
> + cache->object = in;
> + cache->chunk_id = chunk;
> + cache->dirty = 0;
> + cache->locked = 0;
> + yaffs_rd_data_obj(in, chunk,
> + cache->data);
> + } else if (cache &&
> + !cache->dirty &&
> + !yaffs_check_alloc_available(dev,
> + 1)) {
> + /* Drop the cache if it was a read cache
> + * item and no space check has been made
> + * for it.
> + */
> + cache = NULL;
> + }
> +
> + if (cache) {
> + yaffs_use_cache(dev, cache, 1);
> + cache->locked = 1;
> +
> + memcpy(&cache->data[start], buffer,
> + n_copy);
> +
> + cache->locked = 0;
> + cache->n_bytes = n_writeback;
> +
> + if (write_trhrough) {
> + chunk_written =
> + yaffs_wr_data_obj
> + (cache->object,
> + cache->chunk_id,
> + cache->data,
> + cache->n_bytes, 1);
> + cache->dirty = 0;
> + }
> + } else {
> + chunk_written = -1; /* fail write */
> + }
> + } else {
> + /* An incomplete start or end chunk (or maybe
> + * both start and end chunk). Read into the
> + * local buffer then copy over and write back.
> + */
> +
> + u8 *local_buffer =
> + yaffs_get_temp_buffer(dev, __LINE__);
> +
> + yaffs_rd_data_obj(in, chunk, local_buffer);
> + memcpy(&local_buffer[start], buffer, n_copy);
> +
> + chunk_written =
> + yaffs_wr_data_obj(in, chunk,
> + local_buffer,
> + n_writeback, 0);
> +
> + yaffs_release_temp_buffer(dev, local_buffer,
> + __LINE__);
> + }
> + } else {
> + /* A full chunk. Write directly from the buffer. */
> +
> + chunk_written =
> + yaffs_wr_data_obj(in, chunk, buffer,
> + dev->data_bytes_per_chunk, 0);
> +
> + /* Since we've overwritten the cached data,
> + * we better invalidate it. */
> + yaffs_invalidate_chunk_cache(in, chunk);
> + }
> +
> + if (chunk_written >= 0) {
> + n -= n_copy;
> + offset += n_copy;
> + buffer += n_copy;
> + n_done += n_copy;
> + }
> + }
> +
> + /* Update file object */
> +
> + if ((start_write + n_done) > in->variant.file_variant.file_size)
> + in->variant.file_variant.file_size = (start_write + n_done);
> +
> + in->dirty = 1;
> + return n_done;
> +}
> +
> +int yaffs_wr_file(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
> + int n_bytes, int write_trhrough)
> +{
> + yaffs2_handle_hole(in, offset);
> + return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_trhrough);
> +}
> +
> +/* ---------------------- File resizing stuff ------------------ */
> +
> +static void yaffs_prune_chunks(struct yaffs_obj *in, int new_size)
> +{
> +
> + struct yaffs_dev *dev = in->my_dev;
> + int old_size = in->variant.file_variant.file_size;
> + int i;
> + int chunk_id;
> + int last_del = 1 + (old_size - 1) / dev->data_bytes_per_chunk;
> + int start_del = 1 + (new_size + dev->data_bytes_per_chunk - 1) /
> + dev->data_bytes_per_chunk;
> +
> +
> + /* Delete backwards so that we don't end up with holes if
> + * power is lost part-way through the operation.
> + */
> + for (i = last_del; i >= start_del; i--) {
> + /* NB this could be optimised somewhat,
> + * eg. could retrieve the tags and write them without
> + * using yaffs_chunk_del
> + */
> +
> + chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
> +
> + if (chunk_id < 1)
> + continue;
> +
> + if (chunk_id <
> + (dev->internal_start_block * dev->param.chunks_per_block) ||
> + chunk_id >=
> + ((dev->internal_end_block + 1) *
> + dev->param.chunks_per_block)) {
> + yaffs_trace(YAFFS_TRACE_ALWAYS,
> + "Found daft chunk_id %d for %d",
> + chunk_id, i);
> + } else {
> + in->n_data_chunks--;
> + yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
> + }
> + }
> +}
> +
> +void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
> +{
> + int new_fulln;
> + u32 new_partial;
> + struct yaffs_dev *dev = obj->my_dev;
> +
> + yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
> +
> + yaffs_prune_chunks(obj, new_size);
> +
> + if (new_partial != 0) {
> + int last_chunk = 1 + new_full;
> + u8 *local_buffer = yaffs_get_temp_buffer(dev, __LINE__);
> +
> + /* Rewrite the last chunk with its new size and zero pad */
> + yaffs_rd_data_obj(obj, last_chunk, local_buffer);
> + memset(local_buffer + new_partial, 0,
> + dev->data_bytes_per_chunk - new_partial);
> +
> + yaffs_wr_data_obj(obj, last_chunk, local_buffer,
> + new_partial, 1);
> +
> + yaffs_release_temp_buffer(dev, local_buffer, __LINE__);
> + }
> +
> + obj->variant.file_variant.file_size = new_size;
> +
> + yaffs_prune_tree(dev, &obj->variant.file_variant);
> +}
> +
> +int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
> +{
> + struct yaffs_dev *dev = in->my_dev;
> + int old_size = in->variant.file_variant.file_size;
> +
> + yaffs_flush_file_cache(in);
> + yaffs_invalidate_whole_cache(in);
> +
> + yaffs_check_gc(dev, 0);
> +
> + if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
> + return YAFFS_FAIL;
> +
> + if (new_size == old_size)
> + return YAFFS_OK;
> +
> + if (new_size > old_size) {
> + yaffs2_handle_hole(in, new_size);
> + in->variant.file_variant.file_size = new_size;
> + } else {
> + /* new_size < old_size */
> + yaffs_resize_file_down(in, new_size);
> + }
> +
> + /* Write a new object header to reflect the resize.
> + * show we've shrunk the file, if need be
> + * Do this only if the file is not in the deleted directories
> + * and is not shadowed.
> + */
> + if (in->parent &&
> + !in->is_shadowed &&
> + in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
> + in->parent->obj_id != YAFFS_OBJECTID_DELETED)
> + yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
> +
> + return YAFFS_OK;
> +}
> +
> +int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync)
> +{
> + if (!in->dirty)
> + return YAFFS_OK;
> +
> + yaffs_flush_file_cache(in);
> +
> + if (data_sync)
> + return YAFFS_OK;
> +
> + if (update_time)
> + yaffs_load_current_time(in, 0, 0);
> +
> + return (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >= 0) ?
> + YAFFS_OK : YAFFS_FAIL;
> +}
> +
> +
> +/* yaffs_del_file deletes the whole file data
> + * and the inode associated with the file.
> + * It does not delete the links associated with the file.
> + */
> +static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
> +{
> + int ret_val;
> + int del_now = 0;
> + struct yaffs_dev *dev = in->my_dev;
> +
> + if (!in->my_inode)
> + del_now = 1;
> +
> + if (del_now) {

You don't need the del_now intermediate. Just check !in->my_inode directly.

> + ret_val =
> + yaffs_change_obj_name(in, in->my_dev->del_dir,
> + _Y("deleted"), 0, 0);
> + yaffs_trace(YAFFS_TRACE_TRACING,
> + "yaffs: immediate deletion of file %d",
> + in->obj_id);
> + in->deleted = 1;
> + in->my_dev->n_deleted_files++;
> + if (dev->param.disable_soft_del || dev->param.is_yaffs2)
> + yaffs_resize_file(in, 0);
> + yaffs_soft_del_file(in);
> + } else {
> + ret_val =
> + yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
> + _Y("unlinked"), 0, 0);
> + }
> + return ret_val;
> +}
> +
> +int yaffs_del_file(struct yaffs_obj *in)
> +{
> + int ret_val = YAFFS_OK;
> + int deleted; /* Need to cache value on stack if in is freed */
> + struct yaffs_dev *dev = in->my_dev;
> +
> + if (dev->param.disable_soft_del || dev->param.is_yaffs2)
> + yaffs_resize_file(in, 0);
> +
> + if (in->n_data_chunks > 0) {
> + /* Use soft deletion if there is data in the file.
> + * That won't be the case if it has been resized to zero.
> + */
> + if (!in->unlinked)
> + ret_val = yaffs_unlink_file_if_needed(in);
> +
> + deleted = in->deleted;
> +
> + if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
> + in->deleted = 1;
> + deleted = 1;
> + in->my_dev->n_deleted_files++;
> + yaffs_soft_del_file(in);
> + }
> + return deleted ? YAFFS_OK : YAFFS_FAIL;
> + } else {
> + /* The file has no data chunks so we toss it immediately */
> + yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
> + in->variant.file_variant.top = NULL;
> + yaffs_generic_obj_del(in);
> +
> + return YAFFS_OK;
> + }
> +}
> +
> +int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
> +{
> + return (obj &&
> + obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
> + !(list_empty(&obj->variant.dir_variant.children));
> +}
> +
> +static int yaffs_del_dir(struct yaffs_obj *obj)
> +{
> + /* First check that the directory is empty. */
> + if (yaffs_is_non_empty_dir(obj))
> + return YAFFS_FAIL;
> +
> + return yaffs_generic_obj_del(obj);
> +}
> +
> +static int yaffs_del_symlink(struct yaffs_obj *in)
> +{
> + kfree(in->variant.symlink_variant.alias);
> + in->variant.symlink_variant.alias = NULL;
> +
> + return yaffs_generic_obj_del(in);
> +}
> +
> +static int yaffs_del_link(struct yaffs_obj *in)
> +{
> + /* remove this hardlink from the list associated with the equivalent
> + * object
> + */
> + list_del_init(&in->hard_links);
> + return yaffs_generic_obj_del(in);
> +}
> +
> +int yaffs_del_obj(struct yaffs_obj *obj)
> +{
> + int ret_val = -1;
> +
> + switch (obj->variant_type) {
> + case YAFFS_OBJECT_TYPE_FILE:
> + ret_val = yaffs_del_file(obj);
> + break;
> + case YAFFS_OBJECT_TYPE_DIRECTORY:
> + if (!list_empty(&obj->variant.dir_variant.dirty)) {
> + yaffs_trace(YAFFS_TRACE_BACKGROUND,
> + "Remove object %d from dirty directories",
> + obj->obj_id);
> + list_del_init(&obj->variant.dir_variant.dirty);
> + }
> + return yaffs_del_dir(obj);
> + break;
> + case YAFFS_OBJECT_TYPE_SYMLINK:
> + ret_val = yaffs_del_symlink(obj);
> + break;
> + case YAFFS_OBJECT_TYPE_HARDLINK:
> + ret_val = yaffs_del_link(obj);
> + break;
> + case YAFFS_OBJECT_TYPE_SPECIAL:
> + ret_val = yaffs_generic_obj_del(obj);
> + break;
> + case YAFFS_OBJECT_TYPE_UNKNOWN:
> + ret_val = 0;
> + break; /* should not happen. */
> + }
> + return ret_val;
> +}
> +
> +static int yaffs_unlink_worker(struct yaffs_obj *obj)
> +{
> + int del_now = 0;
> +
> + if (!obj)
> + return YAFFS_FAIL;
> +
> + if (!obj->my_inode)
> + del_now = 1;
> +
> + yaffs_update_parent(obj->parent);
> +
> + if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
> + return yaffs_del_link(obj);
> + } else if (!list_empty(&obj->hard_links)) {
> + /* Curve ball: We're unlinking an object that has a hardlink.
> + *
> + * This problem arises because we are not strictly following
> + * The Linux link/inode model.
> + *
> + * We can't really delete the object.
> + * Instead, we do the following:
> + * - Select a hardlink.
> + * - Unhook it from the hard links
> + * - Move it from its parent directory so that the rename works.
> + * - Rename the object to the hardlink's name.
> + * - Delete the hardlink
> + */
> +
> + struct yaffs_obj *hl;
> + struct yaffs_obj *parent;
> + int ret_val;
> + YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
> +
> + hl = list_entry(obj->hard_links.next, struct yaffs_obj,
> + hard_links);
> +
> + yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
> + parent = hl->parent;
> +
> + list_del_init(&hl->hard_links);
> +
> + yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
> +
> + ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0);
> +
> + if (ret_val == YAFFS_OK)
> + ret_val = yaffs_generic_obj_del(hl);
> +
> + return ret_val;
> +
> + } else if (del_now) {
> + switch (obj->variant_type) {
> + case YAFFS_OBJECT_TYPE_FILE:
> + return yaffs_del_file(obj);
> + break;
> + case YAFFS_OBJECT_TYPE_DIRECTORY:
> + list_del_init(&obj->variant.dir_variant.dirty);
> + return yaffs_del_dir(obj);
> + break;
> + case YAFFS_OBJECT_TYPE_SYMLINK:
> + return yaffs_del_symlink(obj);
> + break;
> + case YAFFS_OBJECT_TYPE_SPECIAL:
> + return yaffs_generic_obj_del(obj);
> + break;
> + case YAFFS_OBJECT_TYPE_HARDLINK:
> + case YAFFS_OBJECT_TYPE_UNKNOWN:
> + default:
> + return YAFFS_FAIL;
> + }
> + } else if (yaffs_is_non_empty_dir(obj)) {
> + return YAFFS_FAIL;
> + } else {
> + return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
> + _Y("unlinked"), 0, 0);
> + }
> +}
> +
> +static int yaffs_unlink_obj(struct yaffs_obj *obj)
> +{
> + if (obj && obj->unlink_allowed)
> + return yaffs_unlink_worker(obj);
> +
> + return YAFFS_FAIL;
> +}
> +
> +int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR *name)
> +{
> + struct yaffs_obj *obj;
> +
> + obj = yaffs_find_by_name(dir, name);
> + return yaffs_unlink_obj(obj);
> +}
> +
> +/* Note:
> + * If old_name is NULL then we take old_dir as the object to be renamed.
> + */
> +int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR *old_name,
> + struct yaffs_obj *new_dir, const YCHAR *new_name)
> +{
> + struct yaffs_obj *obj = NULL;
> + struct yaffs_obj *existing_target = NULL;
> + int force = 0;
> + int result;
> + struct yaffs_dev *dev;
> +
> + if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
> + BUG();
> + return YAFFS_FAIL;
> + }
> + if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
> + BUG();
> + return YAFFS_FAIL;
> + }
> +
> + dev = old_dir->my_dev;
> +
> +#ifdef CONFIG_YAFFS_CASE_INSENSITIVE

Remove this ifdef, it can't be configured.

> + /* Special case for case insemsitive systems.
> + * While look-up is case insensitive, the name isn't.
> + * Therefore we might want to change x.txt to X.txt
> + */
> + if (old_dir == new_dir &&
> + old_name && new_name &&
> + strcmp(old_name, new_name) == 0)
> + force = 1;
> +#endif
> +
> + if (strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) >
> + YAFFS_MAX_NAME_LENGTH)
> + /* ENAMETOOLONG */
> + return YAFFS_FAIL;
> +
> + if (old_name)
> + obj = yaffs_find_by_name(old_dir, old_name);
> + else{
> + obj = old_dir;
> + old_dir = obj->parent;
> + }
> +
> + if (obj && obj->rename_allowed) {
> + /* Now handle an existing target, if there is one */
> + existing_target = yaffs_find_by_name(new_dir, new_name);
> + if (yaffs_is_non_empty_dir(existing_target)) {
> + return YAFFS_FAIL; /* ENOTEMPTY */
> + } else if (existing_target && existing_target != obj) {
> + /* Nuke the target first, using shadowing,
> + * but only if it isn't the same object.
> + *
> + * Note we must disable gc here otherwise it can mess
> + * up the shadowing.
> + *
> + */
> + dev->gc_disable = 1;
> + yaffs_change_obj_name(obj, new_dir, new_name, force,
> + existing_target->obj_id);
> + existing_target->is_shadowed = 1;
> + yaffs_unlink_obj(existing_target);
> + dev->gc_disable = 0;
> + }
> +
> + result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
> +
> + yaffs_update_parent(old_dir);
> + if (new_dir != old_dir)
> + yaffs_update_parent(new_dir);
> +
> + return result;
> + }
> + return YAFFS_FAIL;
> +}
> +
> +/*----------------------- Initialisation Scanning ---------------------- */
> +
> +void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
> + int backward_scanning)
> +{
> + struct yaffs_obj *obj;
> +
> + if (backward_scanning) {
> + /* Handle YAFFS2 case (backward scanning)
> + * If the shadowed object exists then ignore.
> + */
> + obj = yaffs_find_by_number(dev, obj_id);
> + if (obj)
> + return;
> + }
> +
> + /* Let's create it (if it does not exist) assuming it is a file so that
> + * it can do shrinking etc.
> + * We put it in unlinked dir to be cleaned up after the scanning
> + */
> + obj =
> + yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE);
> + if (!obj)
> + return;
> + obj->is_shadowed = 1;
> + yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
> + obj->variant.file_variant.shrink_size = 0;
> + obj->valid = 1; /* So that we don't read any other info. */
> +}
> +
> +void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list)
> +{
> + struct list_head *lh;
> + struct list_head *save;
> + struct yaffs_obj *hl;
> + struct yaffs_obj *in;
> +
> + list_for_each_safe(lh, save, hard_list) {
> + hl = list_entry(lh, struct yaffs_obj, hard_links);
> + in = yaffs_find_by_number(dev,
> + hl->variant.hardlink_variant.equiv_id);
> +
> + if (in) {
> + /* Add the hardlink pointers */
> + hl->variant.hardlink_variant.equiv_obj = in;
> + list_add(&hl->hard_links, &in->hard_links);
> + } else {
> + /* Todo Need to report/handle this better.
> + * Got a problem... hardlink to a non-existant object
> + */
> + hl->variant.hardlink_variant.equiv_obj = NULL;
> + INIT_LIST_HEAD(&hl->hard_links);
> + }
> + }
> +}
> +
> +static void yaffs_strip_deleted_objs(struct yaffs_dev *dev)
> +{
> + /*
> + * Sort out state of unlinked and deleted objects after scanning.
> + */
> + struct list_head *i;
> + struct list_head *n;
> + struct yaffs_obj *l;
> +
> + if (dev->read_only)
> + return;
> +
> + /* Soft delete all the unlinked files */
> + list_for_each_safe(i, n,
> + &dev->unlinked_dir->variant.dir_variant.children) {
> + l = list_entry(i, struct yaffs_obj, siblings);
> + yaffs_del_obj(l);
> + }
> +
> + list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) {
> + l = list_entry(i, struct yaffs_obj, siblings);
> + yaffs_del_obj(l);
> + }
> +}
> +
> +/*
> + * This code iterates through all the objects making sure that they are rooted.
> + * Any unrooted objects are re-rooted in lost+found.
> + * An object needs to be in one of:
> + * - Directly under deleted, unlinked
> + * - Directly or indirectly under root.
> + *
> + * Note:
> + * This code assumes that we don't ever change the current relationships
> + * between directories:
> + * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
> + * lost-n-found->parent == root_dir
> + *
> + * This fixes the problem where directories might have inadvertently been
> + * deleted leaving the object "hanging" without being rooted in the
> + * directory tree.
> + */
> +
> +static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj)
> +{
> + return (obj == dev->del_dir ||
> + obj == dev->unlinked_dir || obj == dev->root_dir);
> +}
> +
> +static void yaffs_fix_hanging_objs(struct yaffs_dev *dev)
> +{
> + struct yaffs_obj *obj;
> + struct yaffs_obj *parent;
> + int i;
> + struct list_head *lh;
> + struct list_head *n;
> + int depth_limit;
> + int hanging;
> +
> + if (dev->read_only)
> + return;
> +
> + /* Iterate through the objects in each hash entry,
> + * looking at each object.
> + * Make sure it is rooted.
> + */
> +
> + for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
> + list_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
> + obj = list_entry(lh, struct yaffs_obj, hash_link);
> + parent = obj->parent;
> +
> + if (yaffs_has_null_parent(dev, obj)) {
> + /* These directories are not hanging */
> + hanging = 0;
> + } else if (!parent ||
> + parent->variant_type !=
> + YAFFS_OBJECT_TYPE_DIRECTORY) {
> + hanging = 1;
> + } else if (yaffs_has_null_parent(dev, parent)) {
> + hanging = 0;
> + } else {
> + /*
> + * Need to follow the parent chain to
> + * see if it is hanging.
> + */
> + hanging = 0;
> + depth_limit = 100;
> +
> + while (parent != dev->root_dir &&
> + parent->parent &&
> + parent->parent->variant_type ==
> + YAFFS_OBJECT_TYPE_DIRECTORY &&
> + depth_limit > 0) {
> + parent = parent->parent;
> + depth_limit--;
> + }
> + if (parent != dev->root_dir)
> + hanging = 1;
> + }
> + if (hanging) {
> + yaffs_trace(YAFFS_TRACE_SCAN,
> + "Hanging object %d moved to lost and found",
> + obj->obj_id);
> + yaffs_add_obj_to_dir(dev->lost_n_found, obj);
> + }
> + }
> + }
> +}
> +
> +/*
> + * Delete directory contents for cleaning up lost and found.
> + */
> +static void yaffs_del_dir_contents(struct yaffs_obj *dir)
> +{
> + struct yaffs_obj *obj;
> + struct list_head *lh;
> + struct list_head *n;
> +
> + if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
> + BUG();
> +
> + list_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
> + obj = list_entry(lh, struct yaffs_obj, siblings);
> + if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
> + yaffs_del_dir_contents(obj);
> + yaffs_trace(YAFFS_TRACE_SCAN,
> + "Deleting lost_found object %d",
> + obj->obj_id);
> + yaffs_unlink_obj(obj);
> + }
> +}
> +
> +static void yaffs_empty_l_n_f(struct yaffs_dev *dev)
> +{
> + yaffs_del_dir_contents(dev->lost_n_found);
> +}

I would move this code inline where it is called from. It's quite
obvious what it does and is only used in one place anyway.

> +
> +
> +struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory,
> + const YCHAR *name)
> +{
> + int sum;
> + struct list_head *i;
> + YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
> + struct yaffs_obj *l;
> +
> + if (!name)
> + return NULL;
> +
> + if (!directory) {
> + yaffs_trace(YAFFS_TRACE_ALWAYS,
> + "tragedy: yaffs_find_by_name: null pointer directory"
> + );
> + BUG();
> + return NULL;
> + }
> + if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
> + yaffs_trace(YAFFS_TRACE_ALWAYS,
> + "tragedy: yaffs_find_by_name: non-directory"
> + );
> + BUG();
> + }
> +
> + sum = yaffs_calc_name_sum(name);
> +
> + list_for_each(i, &directory->variant.dir_variant.children) {
> + l = list_entry(i, struct yaffs_obj, siblings);
> +
> + if (l->parent != directory)
> + BUG();
> +
> + yaffs_check_obj_details_loaded(l);
> +
> + /* Special case for lost-n-found */
> + if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
> + if (!strcmp(name, YAFFS_LOSTNFOUND_NAME))
> + return l;
> + } else if (l->sum == sum || l->hdr_chunk <= 0) {
> + /* LostnFound chunk called Objxxx
> + * Do a real check
> + */
> + yaffs_get_obj_name(l, buffer,
> + YAFFS_MAX_NAME_LENGTH + 1);
> + if (strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH) == 0)
> + return l;
> + }
> + }
> + return NULL;
> +}
> +
> +/* GetEquivalentObject dereferences any hard links to get to the
> + * actual object.
> + */
> +
> +struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj)
> +{
> + if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
> + obj = obj->variant.hardlink_variant.equiv_obj;
> + yaffs_check_obj_details_loaded(obj);
> + }
> + return obj;
> +}
> +
> +/*
> + * A note or two on object names.
> + * * If the object name is missing, we then make one up in the form objnnn
> + *
> + * * ASCII names are stored in the object header's name field from byte zero
> + * * Unicode names are historically stored starting from byte zero.
> + *
> + * Then there are automatic Unicode names...
> + * The purpose of these is to save names in a way that can be read as
> + * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
> + * system to share files.
> + *
> + * These automatic unicode are stored slightly differently...
> + * - If the name can fit in the ASCII character space then they are saved as
> + * ascii names as per above.
> + * - If the name needs Unicode then the name is saved in Unicode
> + * starting at oh->name[1].
> +
> + */
> +static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
> + int buffer_size)
> +{
> + /* Create an object name if we could not find one. */
> + if (strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) {
> + YCHAR local_name[20];
> + YCHAR num_string[20];
> + YCHAR *x = &num_string[19];
> + unsigned v = obj->obj_id;
> + num_string[19] = 0;
> + while (v > 0) {
> + x--;
> + *x = '0' + (v % 10);
> + v /= 10;
> + }
> + /* make up a name */
> + strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX);
> + strcat(local_name, x);
> + strncpy(name, local_name, buffer_size - 1);
> + }
> +}
> +
> +int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR *name, int buffer_size)
> +{
> + memset(name, 0, buffer_size * sizeof(YCHAR));
> + yaffs_check_obj_details_loaded(obj);
> + if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND)
> + strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
> +#ifndef CONFIG_YAFFS_NO_SHORT_NAMES

Non-existent Kconfig option.

> + else if (obj->short_name[0])
> + strcpy(name, obj->short_name);
> +#endif
> + else if (obj->hdr_chunk > 0) {
> + int result;
> + u8 *buffer = yaffs_get_temp_buffer(obj->my_dev, __LINE__);
> +
> + struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
> +
> + memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
> +
> + if (obj->hdr_chunk > 0) {
> + result = yaffs_rd_chunk_tags_nand(obj->my_dev,
> + obj->hdr_chunk,
> + buffer, NULL);
> + }
> + yaffs_load_name_from_oh(obj->my_dev, name, oh->name,
> + buffer_size);
> +
> + yaffs_release_temp_buffer(obj->my_dev, buffer, __LINE__);
> + }
> +
> + yaffs_fix_null_name(obj, name, buffer_size);
> +
> + return strnlen(name, YAFFS_MAX_NAME_LENGTH);
> +}
> +
> +int yaffs_get_obj_length(struct yaffs_obj *obj)
> +{
> + /* Dereference any hard linking */
> + obj = yaffs_get_equivalent_obj(obj);
> +
> + if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
> + return obj->variant.file_variant.file_size;
> + if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
> + if (!obj->variant.symlink_variant.alias)
> + return 0;
> + return strnlen(obj->variant.symlink_variant.alias,
> + YAFFS_MAX_ALIAS_LENGTH);
> + } else {
> + /* Only a directory should drop through to here */
> + return obj->my_dev->data_bytes_per_chunk;
> + }
> +}
> +
> +int yaffs_get_obj_link_count(struct yaffs_obj *obj)
> +{
> + int count = 0;
> + struct list_head *i;
> +
> + if (!obj->unlinked)
> + count++; /* the object itself */
> +
> + list_for_each(i, &obj->hard_links)
> + count++; /* add the hard links; */
> +
> + return count;
> +}
> +
> +int yaffs_get_obj_inode(struct yaffs_obj *obj)
> +{
> + obj = yaffs_get_equivalent_obj(obj);
> +
> + return obj->obj_id;
> +}
> +
> +unsigned yaffs_get_obj_type(struct yaffs_obj *obj)
> +{
> + obj = yaffs_get_equivalent_obj(obj);
> +
> + switch (obj->variant_type) {
> + case YAFFS_OBJECT_TYPE_FILE:
> + return DT_REG;
> + break;
> + case YAFFS_OBJECT_TYPE_DIRECTORY:
> + return DT_DIR;
> + break;
> + case YAFFS_OBJECT_TYPE_SYMLINK:
> + return DT_LNK;
> + break;
> + case YAFFS_OBJECT_TYPE_HARDLINK:
> + return DT_REG;
> + break;
> + case YAFFS_OBJECT_TYPE_SPECIAL:
> + if (S_ISFIFO(obj->yst_mode))
> + return DT_FIFO;
> + if (S_ISCHR(obj->yst_mode))
> + return DT_CHR;
> + if (S_ISBLK(obj->yst_mode))
> + return DT_BLK;
> + if (S_ISSOCK(obj->yst_mode))
> + return DT_SOCK;
> + return DT_REG;
> + break;
> + default:
> + return DT_REG;
> + break;
> + }
> +}
> +
> +YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj)
> +{
> + obj = yaffs_get_equivalent_obj(obj);
> + if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
> + return yaffs_clone_str(obj->variant.symlink_variant.alias);
> + else
> + return yaffs_clone_str(_Y(""));
> +}
> +
> +/*--------------------------- Initialisation code -------------------------- */
> +
> +static int yaffs_check_dev_fns(const struct yaffs_dev *dev)
> +{
> + /* Common functions, gotta have */
> + if (!dev->param.erase_fn || !dev->param.initialise_flash_fn)
> + return 0;
> +
> +#ifdef CONFIG_YAFFS_YAFFS2
> +
> + /* Can use the "with tags" style interface for yaffs1 or yaffs2 */
> + if (dev->param.write_chunk_tags_fn &&
> + dev->param.read_chunk_tags_fn &&
> + !dev->param.write_chunk_fn &&
> + !dev->param.read_chunk_fn &&
> + dev->param.bad_block_fn && dev->param.query_block_fn)
> + return 1;
> +#endif
> +
> + /* Can use the "spare" style interface for yaffs1 */
> + if (!dev->param.is_yaffs2 &&
> + !dev->param.write_chunk_tags_fn &&
> + !dev->param.read_chunk_tags_fn &&
> + dev->param.write_chunk_fn &&
> + dev->param.read_chunk_fn &&
> + !dev->param.bad_block_fn && !dev->param.query_block_fn)
> + return 1;
> +
> + return 0; /* bad */
> +}
> +
> +static int yaffs_create_initial_dir(struct yaffs_dev *dev)
> +{
> + /* Initialise the unlinked, deleted, root and lost+found directories */
> + dev->lost_n_found = dev->root_dir = NULL;
> + dev->unlinked_dir = dev->del_dir = NULL;
> + dev->unlinked_dir =
> + yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
> + dev->del_dir =
> + yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
> + dev->root_dir =
> + yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
> + YAFFS_ROOT_MODE | S_IFDIR);
> + dev->lost_n_found =
> + yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
> + YAFFS_LOSTNFOUND_MODE | S_IFDIR);
> +
> + if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir
> + && dev->del_dir) {
> + yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
> + return YAFFS_OK;
> + }
> + return YAFFS_FAIL;
> +}
> +
> +int yaffs_guts_initialise(struct yaffs_dev *dev)
> +{
> + int init_failed = 0;
> + unsigned x;
> + int bits;
> +
> + yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_guts_initialise()");
> +
> + /* Check stuff that must be set */
> +
> + if (!dev) {
> + yaffs_trace(YAFFS_TRACE_ALWAYS,
> + "yaffs: Need a device"
> + );
> + return YAFFS_FAIL;
> + }
> +
> + if (dev->is_mounted) {
> + yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted");
> + return YAFFS_FAIL;
> + }
> +
> + dev->internal_start_block = dev->param.start_block;
> + dev->internal_end_block = dev->param.end_block;
> + dev->block_offset = 0;
> + dev->chunk_offset = 0;
> + dev->n_free_chunks = 0;
> +
> + dev->gc_block = 0;
> +
> + if (dev->param.start_block == 0) {
> + dev->internal_start_block = dev->param.start_block + 1;
> + dev->internal_end_block = dev->param.end_block + 1;
> + dev->block_offset = 1;
> + dev->chunk_offset = dev->param.chunks_per_block;
> + }
> +
> + /* Check geometry parameters. */
> +
> + if ((!dev->param.inband_tags && dev->param.is_yaffs2 &&
> + dev->param.total_bytes_per_chunk < 1024) ||
> + (!dev->param.is_yaffs2 &&
> + dev->param.total_bytes_per_chunk < 512) ||
> + (dev->param.inband_tags && !dev->param.is_yaffs2) ||
> + dev->param.chunks_per_block < 2 ||
> + dev->param.n_reserved_blocks < 2 ||
> + dev->internal_start_block <= 0 ||
> + dev->internal_end_block <= 0 ||
> + dev->internal_end_block <=
> + (dev->internal_start_block + dev->param.n_reserved_blocks + 2)
> + ) {
> + /* otherwise it is too small */
> + yaffs_trace(YAFFS_TRACE_ALWAYS,
> + "NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
> + dev->param.total_bytes_per_chunk,
> + dev->param.is_yaffs2 ? "2" : "",
> + dev->param.inband_tags);
> + return YAFFS_FAIL;
> + }
> +
> + if (yaffs_init_nand(dev) != YAFFS_OK) {
> + yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed");
> + return YAFFS_FAIL;
> + }
> +
> + /* Sort out space for inband tags, if required */
> + if (dev->param.inband_tags)
> + dev->data_bytes_per_chunk =
> + dev->param.total_bytes_per_chunk -
> + sizeof(struct yaffs_packed_tags2_tags_only);
> + else
> + dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
> +
> + /* Got the right mix of functions? */
> + if (!yaffs_check_dev_fns(dev)) {
> + /* Function missing */
> + yaffs_trace(YAFFS_TRACE_ALWAYS,
> + "device function(s) missing or wrong");
> +
> + return YAFFS_FAIL;
> + }
> +
> + /* Finished with most checks. Further checks happen later on too. */
> +
> + dev->is_mounted = 1;
> +
> + /* OK now calculate a few things for the device */
> +
> + /*
> + * Calculate all the chunk size manipulation numbers:
> + */
> + x = dev->data_bytes_per_chunk;
> + /* We always use dev->chunk_shift and dev->chunk_div */
> + dev->chunk_shift = calc_shifts(x);
> + x >>= dev->chunk_shift;
> + dev->chunk_div = x;
> + /* We only use chunk mask if chunk_div is 1 */
> + dev->chunk_mask = (1 << dev->chunk_shift) - 1;
> +
> + /*
> + * Calculate chunk_grp_bits.
> + * We need to find the next power of 2 > than internal_end_block
> + */
> +
> + x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
> +
> + bits = calc_shifts_ceiling(x);
> +
> + /* Set up tnode width if wide tnodes are enabled. */
> + if (!dev->param.wide_tnodes_disabled) {
> + /* bits must be even so that we end up with 32-bit words */
> + if (bits & 1)
> + bits++;
> + if (bits < 16)
> + dev->tnode_width = 16;
> + else
> + dev->tnode_width = bits;
> + } else {
> + dev->tnode_width = 16;
> + }
> +
> + dev->tnode_mask = (1 << dev->tnode_width) - 1;
> +
> + /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
> + * so if the bitwidth of the
> + * chunk range we're using is greater than 16 we need
> + * to figure out chunk shift and chunk_grp_size
> + */
> +
> + if (bits <= dev->tnode_width)
> + dev->chunk_grp_bits = 0;
> + else
> + dev->chunk_grp_bits = bits - dev->tnode_width;
> +
> + dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
> + if (dev->tnode_size < sizeof(struct yaffs_tnode))
> + dev->tnode_size = sizeof(struct yaffs_tnode);
> +
> + dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
> +
> + if (dev->param.chunks_per_block < dev->chunk_grp_size) {
> + /* We have a problem because the soft delete won't work if
> + * the chunk group size > chunks per block.
> + * This can be remedied by using larger "virtual blocks".
> + */
> + yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large");
> +
> + return YAFFS_FAIL;
> + }
> +
> + /* Finished verifying the device, continue with initialisation */
> +
> + /* More device initialisation */
> + dev->all_gcs = 0;
> + dev->passive_gc_count = 0;
> + dev->oldest_dirty_gc_count = 0;
> + dev->bg_gcs = 0;
> + dev->gc_block_finder = 0;
> + dev->buffered_block = -1;
> + dev->doing_buffered_block_rewrite = 0;
> + dev->n_deleted_files = 0;
> + dev->n_bg_deletions = 0;
> + dev->n_unlinked_files = 0;
> + dev->n_ecc_fixed = 0;
> + dev->n_ecc_unfixed = 0;
> + dev->n_tags_ecc_fixed = 0;
> + dev->n_tags_ecc_unfixed = 0;
> + dev->n_erase_failures = 0;
> + dev->n_erased_blocks = 0;
> + dev->gc_disable = 0;
> + dev->has_pending_prioritised_gc = 1;
> + /* Assume the worst for now, will get fixed on first GC */
> + INIT_LIST_HEAD(&dev->dirty_dirs);
> + dev->oldest_dirty_seq = 0;
> + dev->oldest_dirty_block = 0;
> +
> + /* Initialise temporary buffers and caches. */
> + if (!yaffs_init_tmp_buffers(dev))
> + init_failed = 1;
> +
> + dev->cache = NULL;
> + dev->gc_cleanup_list = NULL;
> +
> + if (!init_failed && dev->param.n_caches > 0) {
> + int i;
> + void *buf;
> + int cache_bytes =
> + dev->param.n_caches * sizeof(struct yaffs_cache);
> +
> + if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
> + dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
> +
> + dev->cache = kmalloc(cache_bytes, GFP_NOFS);
> +
> + buf = (u8 *) dev->cache;
> +
> + if (dev->cache)
> + memset(dev->cache, 0, cache_bytes);
> +
> + for (i = 0; i < dev->param.n_caches && buf; i++) {
> + dev->cache[i].object = NULL;
> + dev->cache[i].last_use = 0;
> + dev->cache[i].dirty = 0;
> + dev->cache[i].data = buf =
> + kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
> + }
> + if (!buf)
> + init_failed = 1;

This has the same problems as the temporary buffer allocation function.
Probably best to move the cache allocation into its own function and
correclty free any allocated buffers if an allocation fails.

If the init fails we should probably just bail from this function
instead of having if (!init_failed) everywhere right?

> +
> + dev->cache_last_use = 0;
> + }
> +
> + dev->cache_hits = 0;
> +
> + if (!init_failed) {
> + dev->gc_cleanup_list =
> + kmalloc(dev->param.chunks_per_block * sizeof(u32),
> + GFP_NOFS);
> + if (!dev->gc_cleanup_list)
> + init_failed = 1;
> + }
> +
> + if (dev->param.is_yaffs2)
> + dev->param.use_header_file_size = 1;
> +
> + if (!init_failed && !yaffs_init_blocks(dev))
> + init_failed = 1;
> +
> + yaffs_init_tnodes_and_objs(dev);
> +
> + if (!init_failed && !yaffs_create_initial_dir(dev))
> + init_failed = 1;
> +
> + if (!init_failed) {
> + /* Now scan the flash. */
> + if (dev->param.is_yaffs2) {
> + if (yaffs2_checkpt_restore(dev)) {
> + yaffs_check_obj_details_loaded(dev->root_dir);
> + yaffs_trace(YAFFS_TRACE_CHECKPOINT |
> + YAFFS_TRACE_MOUNT,
> + "yaffs: restored from checkpoint"
> + );
> + } else {
> +
> + /* Clean up the mess caused by an aborted
> + * checkpoint load then scan backwards.
> + */
> + yaffs_deinit_blocks(dev);
> +
> + yaffs_deinit_tnodes_and_objs(dev);
> +
> + dev->n_erased_blocks = 0;
> + dev->n_free_chunks = 0;
> + dev->alloc_block = -1;
> + dev->alloc_page = -1;
> + dev->n_deleted_files = 0;
> + dev->n_unlinked_files = 0;
> + dev->n_bg_deletions = 0;
> +
> + if (!init_failed && !yaffs_init_blocks(dev))
> + init_failed = 1;
> +
> + yaffs_init_tnodes_and_objs(dev);
> +
> + if (!init_failed
> + && !yaffs_create_initial_dir(dev))
> + init_failed = 1;
> +
> + if (!init_failed && !yaffs2_scan_backwards(dev))
> + init_failed = 1;
> + }
> + } else if (!yaffs1_scan(dev)) {
> + init_failed = 1;
> + }
> +
> + yaffs_strip_deleted_objs(dev);
> + yaffs_fix_hanging_objs(dev);
> + if (dev->param.empty_lost_n_found)
> + yaffs_empty_l_n_f(dev);
> + }
> +
> + if (init_failed) {
> + /* Clean up the mess */
> + yaffs_trace(YAFFS_TRACE_TRACING,
> + "yaffs: yaffs_guts_initialise() aborted.");
> +
> + yaffs_deinitialise(dev);
> + return YAFFS_FAIL;
> + }
> +
> + /* Zero out stats */
> + dev->n_page_reads = 0;
> + dev->n_page_writes = 0;
> + dev->n_erasures = 0;
> + dev->n_gc_copies = 0;
> + dev->n_retired_writes = 0;
> +
> + dev->n_retired_blocks = 0;
> +
> + yaffs_verify_free_chunks(dev);
> + yaffs_verify_blocks(dev);
> +
> + /* Clean up any aborted checkpoint data */
> + if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
> + yaffs2_checkpt_invalidate(dev);
> +
> + yaffs_trace(YAFFS_TRACE_TRACING,
> + "yaffs: yaffs_guts_initialise() done.");
> + return YAFFS_OK;
> +}
> +
> +void yaffs_deinitialise(struct yaffs_dev *dev)
> +{
> + if (dev->is_mounted) {
> + int i;
> +
> + yaffs_deinit_blocks(dev);
> + yaffs_deinit_tnodes_and_objs(dev);
> + if (dev->param.n_caches > 0 && dev->cache) {
> +
> + for (i = 0; i < dev->param.n_caches; i++) {
> + kfree(dev->cache[i].data);
> + dev->cache[i].data = NULL;
> + }
> +
> + kfree(dev->cache);
> + dev->cache = NULL;
> + }
> +
> + kfree(dev->gc_cleanup_list);
> +
> + for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
> + kfree(dev->temp_buffer[i].buffer);
> +
> + dev->is_mounted = 0;
> +
> + if (dev->param.deinitialise_flash_fn)
> + dev->param.deinitialise_flash_fn(dev);

Ah okay, the temporary/cache buffers do actually get freed. It's not
particularly obvious though. They should get freed in the error paths
for the allocation functions. Makes it easier to verify that there are
no leaks.

> + }
> +}
> +
> +int yaffs_count_free_chunks(struct yaffs_dev *dev)
> +{
> + int n_free = 0;
> + int b;
> + struct yaffs_block_info *blk;
> +
> + blk = dev->block_info;
> + for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
> + switch (blk->block_state) {
> + case YAFFS_BLOCK_STATE_EMPTY:
> + case YAFFS_BLOCK_STATE_ALLOCATING:
> + case YAFFS_BLOCK_STATE_COLLECTING:
> + case YAFFS_BLOCK_STATE_FULL:
> + n_free +=
> + (dev->param.chunks_per_block - blk->pages_in_use +
> + blk->soft_del_pages);
> + break;
> + default:
> + break;
> + }
> + blk++;
> + }
> + return n_free;
> +}
> +
> +int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
> +{
> + /* This is what we report to the outside world */
> + int n_free;
> + int n_dirty_caches;
> + int blocks_for_checkpt;
> + int i;
> +
> + n_free = dev->n_free_chunks;
> + n_free += dev->n_deleted_files;
> +
> + /* Now count and subtract the number of dirty chunks in the cache. */
> +
> + for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) {
> + if (dev->cache[i].dirty)
> + n_dirty_caches++;
> + }
> +
> + n_free -= n_dirty_caches;
> +
> + n_free -=
> + ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
> +
> + /* Now figure checkpoint space and report that... */
> + blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev);
> +
> + n_free -= (blocks_for_checkpt * dev->param.chunks_per_block);
> +
> + if (n_free < 0)
> + n_free = 0;
> +
> + return n_free;
> +}
> diff --git a/fs/yaffs2/yaffs_guts.h b/fs/yaffs2/yaffs_guts.h
> new file mode 100644
> index 0000000..3a71167
> --- /dev/null
> +++ b/fs/yaffs2/yaffs_guts.h
> @@ -0,0 +1,938 @@
> +/*
> + * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
> + *
> + * Copyright (C) 2002-2011 Aleph One Ltd.
> + * for Toby Churchill Ltd and Brightstar Engineering
> + *
> + * Created by Charles Manning <charles@xxxxxxxxxxxx>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU Lesser General Public License version 2.1 as
> + * published by the Free Software Foundation.
> + *
> + * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
> + */
> +
> +#ifndef __YAFFS_GUTS_H__
> +#define __YAFFS_GUTS_H__
> +
> +#include "yportenv.h"
> +
> +#define YAFFS_OK 1
> +#define YAFFS_FAIL 0
> +
> +/* Give us a Y=0x59,
> + * Give us an A=0x41,
> + * Give us an FF=0xff
> + * Give us an S=0x53
> + * And what have we got...
> + */
> +#define YAFFS_MAGIC 0x5941ff53
> +
> +#define YAFFS_NTNODES_LEVEL0 16
> +#define YAFFS_TNODES_LEVEL0_BITS 4
> +#define YAFFS_TNODES_LEVEL0_MASK 0xf
> +
> +#define YAFFS_NTNODES_INTERNAL (YAFFS_NTNODES_LEVEL0 / 2)
> +#define YAFFS_TNODES_INTERNAL_BITS (YAFFS_TNODES_LEVEL0_BITS - 1)
> +#define YAFFS_TNODES_INTERNAL_MASK 0x7
> +#define YAFFS_TNODES_MAX_LEVEL 6
> +
> +#ifndef CONFIG_YAFFS_NO_YAFFS1
> +#define YAFFS_BYTES_PER_SPARE 16
> +#define YAFFS_BYTES_PER_CHUNK 512
> +#define YAFFS_CHUNK_SIZE_SHIFT 9
> +#define YAFFS_CHUNKS_PER_BLOCK 32
> +#define YAFFS_BYTES_PER_BLOCK (YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK)
> +#endif
> +
> +#define YAFFS_MIN_YAFFS2_CHUNK_SIZE 1024
> +#define YAFFS_MIN_YAFFS2_SPARE_SIZE 32
> +
> +#define YAFFS_MAX_CHUNK_ID 0x000fffff
> +
> +#define YAFFS_ALLOCATION_NOBJECTS 100
> +#define YAFFS_ALLOCATION_NTNODES 100
> +#define YAFFS_ALLOCATION_NLINKS 100
> +
> +#define YAFFS_NOBJECT_BUCKETS 256
> +
> +#define YAFFS_OBJECT_SPACE 0x40000
> +#define YAFFS_MAX_OBJECT_ID (YAFFS_OBJECT_SPACE - 1)
> +
> +#define YAFFS_CHECKPOINT_VERSION 4
> +
> +#ifdef CONFIG_YAFFS_UNICODE
> +#define YAFFS_MAX_NAME_LENGTH 127
> +#define YAFFS_MAX_ALIAS_LENGTH 79
> +#else
> +#define YAFFS_MAX_NAME_LENGTH 255
> +#define YAFFS_MAX_ALIAS_LENGTH 159
> +#endif
> +
> +#define YAFFS_SHORT_NAME_LENGTH 15
> +
> +/* Some special object ids for pseudo objects */
> +#define YAFFS_OBJECTID_ROOT 1
> +#define YAFFS_OBJECTID_LOSTNFOUND 2
> +#define YAFFS_OBJECTID_UNLINKED 3
> +#define YAFFS_OBJECTID_DELETED 4
> +
> +/* Pseudo object ids for checkpointing */
> +#define YAFFS_OBJECTID_SB_HEADER 0x10
> +#define YAFFS_OBJECTID_CHECKPOINT_DATA 0x20
> +#define YAFFS_SEQUENCE_CHECKPOINT_DATA 0x21
> +
> +#define YAFFS_MAX_SHORT_OP_CACHES 20
> +
> +#define YAFFS_N_TEMP_BUFFERS 6
> +
> +/* We limit the number attempts at sucessfully saving a chunk of data.
> + * Small-page devices have 32 pages per block; large-page devices have 64.
> + * Default to something in the order of 5 to 10 blocks worth of chunks.
> + */
> +#define YAFFS_WR_ATTEMPTS (5*64)
> +
> +/* Sequence numbers are used in YAFFS2 to determine block allocation order.
> + * The range is limited slightly to help distinguish bad numbers from good.
> + * This also allows us to perhaps in the future use special numbers for
> + * special purposes.
> + * EFFFFF00 allows the allocation of 8 blocks/second (~1Mbytes) for 15 years,
> + * and is a larger number than the lifetime of a 2GB device.
> + */
> +#define YAFFS_LOWEST_SEQUENCE_NUMBER 0x00001000
> +#define YAFFS_HIGHEST_SEQUENCE_NUMBER 0xefffff00
> +
> +/* Special sequence number for bad block that failed to be marked bad */
> +#define YAFFS_SEQUENCE_BAD_BLOCK 0xffff0000
> +
> +/* ChunkCache is used for short read/write operations.*/
> +struct yaffs_cache {
> + struct yaffs_obj *object;
> + int chunk_id;
> + int last_use;
> + int dirty;
> + int n_bytes; /* Only valid if the cache is dirty */
> + int locked; /* Can't push out or flush while locked. */
> + u8 *data;
> +};
> +
> +/* Tags structures in RAM
> + * NB This uses bitfield. Bitfields should not straddle a u32 boundary
> + * otherwise the structure size will get blown out.
> + */
> +
> +#ifndef CONFIG_YAFFS_NO_YAFFS1
> +struct yaffs_tags {
> + unsigned chunk_id:20;
> + unsigned serial_number:2;
> + unsigned n_bytes_lsb:10;
> + unsigned obj_id:18;
> + unsigned ecc:12;
> + unsigned n_bytes_msb:2;
> +};
> +
> +union yaffs_tags_union {
> + struct yaffs_tags as_tags;
> + u8 as_bytes[8];
> +};
> +
> +#endif
> +
> +/* Stuff used for extended tags in YAFFS2 */
> +
> +enum yaffs_ecc_result {
> + YAFFS_ECC_RESULT_UNKNOWN,
> + YAFFS_ECC_RESULT_NO_ERROR,
> + YAFFS_ECC_RESULT_FIXED,
> + YAFFS_ECC_RESULT_UNFIXED
> +};
> +
> +enum yaffs_obj_type {
> + YAFFS_OBJECT_TYPE_UNKNOWN,
> + YAFFS_OBJECT_TYPE_FILE,
> + YAFFS_OBJECT_TYPE_SYMLINK,
> + YAFFS_OBJECT_TYPE_DIRECTORY,
> + YAFFS_OBJECT_TYPE_HARDLINK,
> + YAFFS_OBJECT_TYPE_SPECIAL
> +};
> +
> +#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
> +
> +struct yaffs_ext_tags {
> +
> + unsigned validity0;
> + unsigned chunk_used; /* Status of the chunk: used or unused */
> + unsigned obj_id; /* If 0 this is not used */
> + unsigned chunk_id; /* If 0 this is a header, else a data chunk */
> + unsigned n_bytes; /* Only valid for data chunks */
> +
> + /* The following stuff only has meaning when we read */
> + enum yaffs_ecc_result ecc_result;
> + unsigned block_bad;
> +
> + /* YAFFS 1 stuff */
> + unsigned is_deleted; /* The chunk is marked deleted */
> + unsigned serial_number; /* Yaffs1 2-bit serial number */
> +
> + /* YAFFS2 stuff */
> + unsigned seq_number; /* The sequence number of this block */
> +
> + /* Extra info if this is an object header (YAFFS2 only) */
> +
> + unsigned extra_available; /* Extra info available if not zero */
> + unsigned extra_parent_id; /* The parent object */
> + unsigned extra_is_shrink; /* Is it a shrink header? */
> + unsigned extra_shadows; /* Does this shadow another object? */
> +
> + enum yaffs_obj_type extra_obj_type; /* What object type? */
> +
> + unsigned extra_length; /* Length if it is a file */
> + unsigned extra_equiv_id; /* Equivalent object for a hard link */
> +
> + unsigned validity1;
> +
> +};
> +
> +/* Spare structure for YAFFS1 */
> +struct yaffs_spare {
> + u8 tb0;
> + u8 tb1;
> + u8 tb2;
> + u8 tb3;
> + u8 page_status; /* set to 0 to delete the chunk */
> + u8 block_status;
> + u8 tb4;
> + u8 tb5;
> + u8 ecc1[3];
> + u8 tb6;
> + u8 tb7;
> + u8 ecc2[3];
> +};
> +
> +/*Special structure for passing through to mtd */
> +struct yaffs_nand_spare {
> + struct yaffs_spare spare;
> + int eccres1;
> + int eccres2;
> +};
> +
> +/* Block data in RAM */
> +
> +enum yaffs_block_state {
> + YAFFS_BLOCK_STATE_UNKNOWN = 0,
> +
> + YAFFS_BLOCK_STATE_SCANNING,
> + /* Being scanned */
> +
> + YAFFS_BLOCK_STATE_NEEDS_SCAN,
> + /* The block might have something on it (ie it is allocating or full,
> + * perhaps empty) but it needs to be scanned to determine its true
> + * state.
> + * This state is only valid during scanning.
> + * NB We tolerate empty because the pre-scanner might be incapable of
> + * deciding
> + * However, if this state is returned on a YAFFS2 device,
> + * then we expect a sequence number
> + */
> +
> + YAFFS_BLOCK_STATE_EMPTY,
> + /* This block is empty */
> +
> + YAFFS_BLOCK_STATE_ALLOCATING,
> + /* This block is partially allocated.
> + * At least one page holds valid data.
> + * This is the one currently being used for page
> + * allocation. Should never be more than one of these.
> + * If a block is only partially allocated at mount it is treated as
> + * full.
> + */
> +
> + YAFFS_BLOCK_STATE_FULL,
> + /* All the pages in this block have been allocated.
> + * If a block was only partially allocated when mounted we treat
> + * it as fully allocated.
> + */
> +
> + YAFFS_BLOCK_STATE_DIRTY,
> + /* The block was full and now all chunks have been deleted.
> + * Erase me, reuse me.
> + */
> +
> + YAFFS_BLOCK_STATE_CHECKPOINT,
> + /* This block is assigned to holding checkpoint data. */
> +
> + YAFFS_BLOCK_STATE_COLLECTING,
> + /* This block is being garbage collected */
> +
> + YAFFS_BLOCK_STATE_DEAD
> + /* This block has failed and is not in use */
> +};
> +
> +#define YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
> +
> +struct yaffs_block_info {
> +
> + int soft_del_pages:10; /* number of soft deleted pages */
> + int pages_in_use:10; /* number of pages in use */
> + unsigned block_state:4; /* One of the above block states. */
> + /* NB use unsigned because enum is sometimes
> + * an int */
> + u32 needs_retiring:1; /* Data has failed on this block, */
> + /*need to get valid data off and retire*/
> + u32 skip_erased_check:1;/* Skip the erased check on this block */
> + u32 gc_prioritise:1; /* An ECC check or blank check has failed.
> + Block should be prioritised for GC */
> + u32 chunk_error_strikes:3; /* How many times we've had ecc etc
> + failures on this block and tried to reuse it */
> +
> +#ifdef CONFIG_YAFFS_YAFFS2
> + u32 has_shrink_hdr:1; /* This block has at least one shrink header */
> + u32 seq_number; /* block sequence number for yaffs2 */
> +#endif
> +
> +};
> +
> +/* -------------------------- Object structure -------------------------------*/
> +/* This is the object structure as stored on NAND */
> +
> +struct yaffs_obj_hdr {
> + enum yaffs_obj_type type;
> +
> + /* Apply to everything */
> + int parent_obj_id;
> + u16 sum_no_longer_used; /* checksum of name. No longer used */
> + YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
> +
> + /* The following apply to all object types except for hard links */
> + u32 yst_mode; /* protection */
> +
> + u32 yst_uid;
> + u32 yst_gid;
> + u32 yst_atime;
> + u32 yst_mtime;
> + u32 yst_ctime;
> +
> + /* File size applies to files only */
> + int file_size;
> +
> + /* Equivalent object id applies to hard links only. */
> + int equiv_id;
> +
> + /* Alias is for symlinks only. */
> + YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
> +
> + u32 yst_rdev; /* stuff for block and char devices (major/min) */
> +
> + u32 win_ctime[2];
> + u32 win_atime[2];
> + u32 win_mtime[2];
> +
> + u32 inband_shadowed_obj_id;
> + u32 inband_is_shrink;
> +
> + u32 reserved[2];
> + int shadows_obj; /* This object header shadows the
> + specified object if > 0 */
> +
> + /* is_shrink applies to object headers written when wemake a hole. */
> + u32 is_shrink;
> +
> +};
> +
> +/*--------------------------- Tnode -------------------------- */
> +
> +struct yaffs_tnode {
> + struct yaffs_tnode *internal[YAFFS_NTNODES_INTERNAL];
> +};
> +
> +/*------------------------ Object -----------------------------*/
> +/* An object can be one of:
> + * - a directory (no data, has children links
> + * - a regular file (data.... not prunes :->).
> + * - a symlink [symbolic link] (the alias).
> + * - a hard link
> + */
> +
> +struct yaffs_file_var {
> + u32 file_size;
> + u32 scanned_size;
> + u32 shrink_size;
> + int top_level;
> + struct yaffs_tnode *top;
> +};
> +
> +struct yaffs_dir_var {
> + struct list_head children; /* list of child links */
> + struct list_head dirty; /* Entry for list of dirty directories */
> +};
> +
> +struct yaffs_symlink_var {
> + YCHAR *alias;
> +};
> +
> +struct yaffs_hardlink_var {
> + struct yaffs_obj *equiv_obj;
> + u32 equiv_id;
> +};
> +
> +union yaffs_obj_var {
> + struct yaffs_file_var file_variant;
> + struct yaffs_dir_var dir_variant;
> + struct yaffs_symlink_var symlink_variant;
> + struct yaffs_hardlink_var hardlink_variant;
> +};
> +
> +struct yaffs_obj {
> + u8 deleted:1; /* This should only apply to unlinked files. */
> + u8 soft_del:1; /* it has also been soft deleted */
> + u8 unlinked:1; /* An unlinked file.*/
> + u8 fake:1; /* A fake object has no presence on NAND. */
> + u8 rename_allowed:1; /* Some objects cannot be renamed. */
> + u8 unlink_allowed:1;
> + u8 dirty:1; /* the object needs to be written to flash */
> + u8 valid:1; /* When the file system is being loaded up, this
> + * object might be created before the data
> + * is available
> + * ie. file data chunks encountered before
> + * the header.
> + */
> + u8 lazy_loaded:1; /* This object has been lazy loaded and
> + * is missing some detail */
> +
> + u8 defered_free:1; /* Object is removed from NAND, but is
> + * still in the inode cache.
> + * Free of object is defered.
> + * until the inode is released.
> + */
> + u8 being_created:1; /* This object is still being created
> + * so skip some verification checks. */
> + u8 is_shadowed:1; /* This object is shadowed on the way
> + * to being renamed. */
> +
> + u8 xattr_known:1; /* We know if this has object has xattribs
> + * or not. */
> + u8 has_xattr:1; /* This object has xattribs.
> + * Only valid if xattr_known. */
> +
> + u8 serial; /* serial number of chunk in NAND.*/
> + u16 sum; /* sum of the name to speed searching */
> +
> + struct yaffs_dev *my_dev; /* The device I'm on */
> +
> + struct list_head hash_link; /* list of objects in hash bucket */
> +
> + struct list_head hard_links; /* hard linked object chain*/
> +
> + /* directory structure stuff */
> + /* also used for linking up the free list */
> + struct yaffs_obj *parent;
> + struct list_head siblings;
> +
> + /* Where's my object header in NAND? */
> + int hdr_chunk;
> +
> + int n_data_chunks; /* Number of data chunks for this file. */
> +
> + u32 obj_id; /* the object id value */
> +
> + u32 yst_mode;
> +
> +#ifndef CONFIG_YAFFS_NO_SHORT_NAMES

Doesn't exist.

> + YCHAR short_name[YAFFS_SHORT_NAME_LENGTH + 1];
> +#endif
> +
> +#ifdef CONFIG_YAFFS_WINCE

Nope!

> + u32 win_ctime[2];
> + u32 win_mtime[2];
> + u32 win_atime[2];
> +#else
> + u32 yst_uid;
> + u32 yst_gid;
> + u32 yst_atime;
> + u32 yst_mtime;
> + u32 yst_ctime;
> +#endif
> +
> + u32 yst_rdev;
> +
> + void *my_inode;
> +
> + enum yaffs_obj_type variant_type;
> +
> + union yaffs_obj_var variant;
> +
> +};
> +
> +struct yaffs_obj_bucket {
> + struct list_head list;
> + int count;
> +};
> +
> +/* yaffs_checkpt_obj holds the definition of an object as dumped
> + * by checkpointing.
> + */
> +
> +struct yaffs_checkpt_obj {
> + int struct_type;
> + u32 obj_id;
> + u32 parent_id;
> + int hdr_chunk;
> + enum yaffs_obj_type variant_type:3;
> + u8 deleted:1;
> + u8 soft_del:1;
> + u8 unlinked:1;
> + u8 fake:1;
> + u8 rename_allowed:1;
> + u8 unlink_allowed:1;
> + u8 serial;
> + int n_data_chunks;
> + u32 size_or_equiv_obj;
> +};
> +
> +/*--------------------- Temporary buffers ----------------
> + *
> + * These are chunk-sized working buffers. Each device has a few
> + */
> +
> +struct yaffs_buffer {
> + u8 *buffer;
> + int line; /* track from whence this buffer was allocated */
> + int max_line;
> +};
> +
> +/*----------------- Device ---------------------------------*/
> +
> +struct yaffs_param {
> + const YCHAR *name;
> +
> + /*
> + * Entry parameters set up way early. Yaffs sets up the rest.
> + * The structure should be zeroed out before use so that unused
> + * and defualt values are zero.
> + */
> +
> + int inband_tags; /* Use unband tags */
> + u32 total_bytes_per_chunk; /* Should be >= 512, does not need to
> + be a power of 2 */
> + int chunks_per_block; /* does not need to be a power of 2 */
> + int spare_bytes_per_chunk; /* spare area size */
> + int start_block; /* Start block we're allowed to use */
> + int end_block; /* End block we're allowed to use */
> + int n_reserved_blocks; /* Tuneable so that we can reduce
> + * reserved blocks on NOR and RAM. */
> +
> + int n_caches; /* If <= 0, then short op caching is disabled,
> + * else the number of short op caches.
> + */
> + int use_nand_ecc; /* Flag to decide whether or not to use
> + * NAND driver ECC on data (yaffs1) */
> + int no_tags_ecc; /* Flag to decide whether or not to do ECC
> + * on packed tags (yaffs2) */
> +
> + int is_yaffs2; /* Use yaffs2 mode on this device */
> +
> + int empty_lost_n_found; /* Auto-empty lost+found directory on mount */
> +
> + int refresh_period; /* How often to check for a block refresh */
> +
> + /* Checkpoint control. Can be set before or after initialisation */
> + u8 skip_checkpt_rd;
> + u8 skip_checkpt_wr;
> +
> + int enable_xattr; /* Enable xattribs */
> +
> + /* NAND access functions (Must be set before calling YAFFS) */
> +
> + int (*write_chunk_fn) (struct yaffs_dev *dev,
> + int nand_chunk, const u8 *data,
> + const struct yaffs_spare *spare);
> + int (*read_chunk_fn) (struct yaffs_dev *dev,
> + int nand_chunk, u8 *data,
> + struct yaffs_spare *spare);
> + int (*erase_fn) (struct yaffs_dev *dev, int flash_block);
> + int (*initialise_flash_fn) (struct yaffs_dev *dev);
> + int (*deinitialise_flash_fn) (struct yaffs_dev *dev);
> +
> +#ifdef CONFIG_YAFFS_YAFFS2
> + int (*write_chunk_tags_fn) (struct yaffs_dev *dev,
> + int nand_chunk, const u8 *data,
> + const struct yaffs_ext_tags *tags);
> + int (*read_chunk_tags_fn) (struct yaffs_dev *dev,
> + int nand_chunk, u8 *data,
> + struct yaffs_ext_tags *tags);
> + int (*bad_block_fn) (struct yaffs_dev *dev, int block_no);
> + int (*query_block_fn) (struct yaffs_dev *dev, int block_no,
> + enum yaffs_block_state *state,
> + u32 *seq_number);
> +#endif
> +
> + /* The remove_obj_fn function must be supplied by OS flavours that
> + * need it.
> + * yaffs direct uses it to implement the faster readdir.
> + * Linux uses it to protect the directory during unlocking.
> + */
> + void (*remove_obj_fn) (struct yaffs_obj *obj);
> +
> + /* Callback to mark the superblock dirty */
> + void (*sb_dirty_fn) (struct yaffs_dev *dev);
> +
> + /* Callback to control garbage collection. */
> + unsigned (*gc_control) (struct yaffs_dev *dev);
> +
> + /* Debug control flags. Don't use unless you know what you're doing */
> + int use_header_file_size; /* Flag to determine if we should use
> + * file sizes from the header */
> + int disable_lazy_load; /* Disable lazy loading on this device */
> + int wide_tnodes_disabled; /* Set to disable wide tnodes */
> + int disable_soft_del; /* yaffs 1 only: Set to disable the use of
> + * softdeletion. */
> +
> + int defered_dir_update; /* Set to defer directory updates */
> +
> +#ifdef CONFIG_YAFFS_AUTO_UNICODE

Doesn't exist.

> + int auto_unicode;
> +#endif
> + int always_check_erased; /* Force chunk erased check always on */
> +};
> +
> +struct yaffs_dev {
> + struct yaffs_param param;
> +
> + /* Context storage. Holds extra OS specific data for this device */
> +
> + void *os_context;
> + void *driver_context;
> +
> + struct list_head dev_list;
> +
> + /* Runtime parameters. Set up by YAFFS. */
> + int data_bytes_per_chunk;
> +
> + /* Non-wide tnode stuff */
> + u16 chunk_grp_bits; /* Number of bits that need to be resolved if
> + * the tnodes are not wide enough.
> + */
> + u16 chunk_grp_size; /* == 2^^chunk_grp_bits */
> +
> + /* Stuff to support wide tnodes */
> + u32 tnode_width;
> + u32 tnode_mask;
> + u32 tnode_size;
> +
> + /* Stuff for figuring out file offset to chunk conversions */
> + u32 chunk_shift; /* Shift value */
> + u32 chunk_div; /* Divisor after shifting: 1 for 2^n sizes */
> + u32 chunk_mask; /* Mask to use for power-of-2 case */
> +
> + int is_mounted;
> + int read_only;
> + int is_checkpointed;
> +
> + /* Stuff to support block offsetting to support start block zero */
> + int internal_start_block;
> + int internal_end_block;
> + int block_offset;
> + int chunk_offset;
> +
> + /* Runtime checkpointing stuff */
> + int checkpt_page_seq; /* running sequence number of checkpt pages */
> + int checkpt_byte_count;
> + int checkpt_byte_offs;
> + u8 *checkpt_buffer;
> + int checkpt_open_write;
> + int blocks_in_checkpt;
> + int checkpt_cur_chunk;
> + int checkpt_cur_block;
> + int checkpt_next_block;
> + int *checkpt_block_list;
> + int checkpt_max_blocks;
> + u32 checkpt_sum;
> + u32 checkpt_xor;
> +
> + int checkpoint_blocks_required; /* Number of blocks needed to store
> + * current checkpoint set */
> +
> + /* Block Info */
> + struct yaffs_block_info *block_info;
> + u8 *chunk_bits; /* bitmap of chunks in use */
> + unsigned block_info_alt:1; /* allocated using alternative alloc */
> + unsigned chunk_bits_alt:1; /* allocated using alternative alloc */
> + int chunk_bit_stride; /* Number of bytes of chunk_bits per block.
> + * Must be consistent with chunks_per_block.
> + */
> +
> + int n_erased_blocks;
> + int alloc_block; /* Current block being allocated off */
> + u32 alloc_page;
> + int alloc_block_finder; /* Used to search for next allocation block */
> +
> + /* Object and Tnode memory management */
> + void *allocator;
> + int n_obj;
> + int n_tnodes;
> +
> + int n_hardlinks;
> +
> + struct yaffs_obj_bucket obj_bucket[YAFFS_NOBJECT_BUCKETS];
> + u32 bucket_finder;
> +
> + int n_free_chunks;
> +
> + /* Garbage collection control */
> + u32 *gc_cleanup_list; /* objects to delete at the end of a GC. */
> + u32 n_clean_ups;
> +
> + unsigned has_pending_prioritised_gc; /* We think this device might
> + have pending prioritised gcs */
> + unsigned gc_disable;
> + unsigned gc_block_finder;
> + unsigned gc_dirtiest;
> + unsigned gc_pages_in_use;
> + unsigned gc_not_done;
> + unsigned gc_block;
> + unsigned gc_chunk;
> + unsigned gc_skip;
> +
> + /* Special directories */
> + struct yaffs_obj *root_dir;
> + struct yaffs_obj *lost_n_found;
> +
> + int buffered_block; /* Which block is buffered here? */
> + int doing_buffered_block_rewrite;
> +
> + struct yaffs_cache *cache;
> + int cache_last_use;
> +
> + /* Stuff for background deletion and unlinked files. */
> + struct yaffs_obj *unlinked_dir; /* Directory where unlinked and deleted
> + files live. */
> + struct yaffs_obj *del_dir; /* Directory where deleted objects are
> + sent to disappear. */
> + struct yaffs_obj *unlinked_deletion; /* Current file being
> + background deleted. */
> + int n_deleted_files; /* Count of files awaiting deletion; */
> + int n_unlinked_files; /* Count of unlinked files. */
> + int n_bg_deletions; /* Count of background deletions. */
> +
> + /* Temporary buffer management */
> + struct yaffs_buffer temp_buffer[YAFFS_N_TEMP_BUFFERS];
> + int max_temp;
> + int temp_in_use;
> + int unmanaged_buffer_allocs;
> + int unmanaged_buffer_deallocs;
> +
> + /* yaffs2 runtime stuff */
> + unsigned seq_number; /* Sequence number of currently
> + allocating block */
> + unsigned oldest_dirty_seq;
> + unsigned oldest_dirty_block;
> +
> + /* Block refreshing */
> + int refresh_skip; /* A skip down counter.
> + * Refresh happens when this gets to zero. */
> +
> + /* Dirty directory handling */
> + struct list_head dirty_dirs; /* List of dirty directories */
> +
> + /* Statistcs */
> + u32 n_page_writes;
> + u32 n_page_reads;
> + u32 n_erasures;
> + u32 n_erase_failures;
> + u32 n_gc_copies;
> + u32 all_gcs;
> + u32 passive_gc_count;
> + u32 oldest_dirty_gc_count;
> + u32 n_gc_blocks;
> + u32 bg_gcs;
> + u32 n_retired_writes;
> + u32 n_retired_blocks;
> + u32 n_ecc_fixed;
> + u32 n_ecc_unfixed;
> + u32 n_tags_ecc_fixed;
> + u32 n_tags_ecc_unfixed;
> + u32 n_deletions;
> + u32 n_unmarked_deletions;
> + u32 refresh_count;
> + u32 cache_hits;
> +
> +};
> +
> +/* The CheckpointDevice structure holds the device information that changes
> + *at runtime and must be preserved over unmount/mount cycles.
> + */
> +struct yaffs_checkpt_dev {
> + int struct_type;
> + int n_erased_blocks;
> + int alloc_block; /* Current block being allocated off */
> + u32 alloc_page;
> + int n_free_chunks;
> +
> + int n_deleted_files; /* Count of files awaiting deletion; */
> + int n_unlinked_files; /* Count of unlinked files. */
> + int n_bg_deletions; /* Count of background deletions. */
> +
> + /* yaffs2 runtime stuff */
> + unsigned seq_number; /* Sequence number of currently
> + * allocating block */
> +
> +};
> +
> +struct yaffs_checkpt_validity {
> + int struct_type;
> + u32 magic;
> + u32 version;
> + u32 head;
> +};
> +
> +struct yaffs_shadow_fixer {
> + int obj_id;
> + int shadowed_id;
> + struct yaffs_shadow_fixer *next;
> +};
> +
> +/* Structure for doing xattr modifications */
> +struct yaffs_xattr_mod {
> + int set; /* If 0 then this is a deletion */
> + const YCHAR *name;
> + const void *data;
> + int size;
> + int flags;
> + int result;
> +};
> +
> +/*----------------------- YAFFS Functions -----------------------*/
> +
> +int yaffs_guts_initialise(struct yaffs_dev *dev);
> +void yaffs_deinitialise(struct yaffs_dev *dev);
> +
> +int yaffs_get_n_free_chunks(struct yaffs_dev *dev);
> +
> +int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
> + struct yaffs_obj *new_dir, const YCHAR * new_name);
> +
> +int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name);
> +int yaffs_del_obj(struct yaffs_obj *obj);
> +
> +int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size);
> +int yaffs_get_obj_length(struct yaffs_obj *obj);
> +int yaffs_get_obj_inode(struct yaffs_obj *obj);
> +unsigned yaffs_get_obj_type(struct yaffs_obj *obj);
> +int yaffs_get_obj_link_count(struct yaffs_obj *obj);
> +
> +/* File operations */
> +int yaffs_file_rd(struct yaffs_obj *obj, u8 * buffer, loff_t offset,
> + int n_bytes);
> +int yaffs_wr_file(struct yaffs_obj *obj, const u8 * buffer, loff_t offset,
> + int n_bytes, int write_trhrough);
> +int yaffs_resize_file(struct yaffs_obj *obj, loff_t new_size);
> +
> +struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
> + const YCHAR *name, u32 mode, u32 uid,
> + u32 gid);
> +
> +int yaffs_flush_file(struct yaffs_obj *obj, int update_time, int data_sync);
> +
> +/* Flushing and checkpointing */
> +void yaffs_flush_whole_cache(struct yaffs_dev *dev);
> +
> +int yaffs_checkpoint_save(struct yaffs_dev *dev);
> +int yaffs_checkpoint_restore(struct yaffs_dev *dev);
> +
> +/* Directory operations */
> +struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
> + u32 mode, u32 uid, u32 gid);
> +struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *the_dir,
> + const YCHAR *name);
> +struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number);
> +
> +/* Link operations */
> +struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR *name,
> + struct yaffs_obj *equiv_obj);
> +
> +struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj);
> +
> +/* Symlink operations */
> +struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
> + const YCHAR *name, u32 mode, u32 uid,
> + u32 gid, const YCHAR *alias);
> +YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj);
> +
> +/* Special inodes (fifos, sockets and devices) */
> +struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
> + const YCHAR *name, u32 mode, u32 uid,
> + u32 gid, u32 rdev);
> +
> +int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR *name,
> + const void *value, int size, int flags);
> +int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR *name, void *value,
> + int size);
> +int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size);
> +int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR *name);
> +
> +/* Special directories */
> +struct yaffs_obj *yaffs_root(struct yaffs_dev *dev);
> +struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev);
> +
> +void yaffs_handle_defered_free(struct yaffs_obj *obj);
> +
> +void yaffs_update_dirty_dirs(struct yaffs_dev *dev);
> +
> +int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency);
> +
> +/* Debug dump */
> +int yaffs_dump_obj(struct yaffs_obj *obj);
> +
> +void yaffs_guts_test(struct yaffs_dev *dev);
> +
> +/* A few useful functions to be used within the core files*/
> +void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
> + int lyn);
> +int yaffs_check_ff(u8 *buffer, int n_bytes);
> +void yaffs_handle_chunk_error(struct yaffs_dev *dev,
> + struct yaffs_block_info *bi);
> +
> +u8 *yaffs_get_temp_buffer(struct yaffs_dev *dev, int line_no);
> +void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer, int line_no);
> +
> +struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
> + int number,
> + enum yaffs_obj_type type);
> +int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
> + int nand_chunk, int in_scan);
> +void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR *name);
> +void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
> + const struct yaffs_obj_hdr *oh);
> +void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj);
> +YCHAR *yaffs_clone_str(const YCHAR *str);
> +void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list);
> +void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no);
> +int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name,
> + int force, int is_shrink, int shadows,
> + struct yaffs_xattr_mod *xop);
> +void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
> + int backward_scanning);
> +int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks);
> +struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev);
> +struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
> + struct yaffs_file_var *file_struct,
> + u32 chunk_id,
> + struct yaffs_tnode *passed_tn);
> +
> +int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
> + int n_bytes, int write_trhrough);
> +void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size);
> +void yaffs_skip_rest_of_block(struct yaffs_dev *dev);
> +
> +int yaffs_count_free_chunks(struct yaffs_dev *dev);
> +
> +struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
> + struct yaffs_file_var *file_struct,
> + u32 chunk_id);
> +
> +u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
> + unsigned pos);
> +
> +int yaffs_is_non_empty_dir(struct yaffs_obj *obj);
> +#endif


--
Bluewater Systems Ltd - ARM Technology Solution Centre

Ryan Mallon 5 Amuri Park, 404 Barbadoes St
ryan@xxxxxxxxxxxxxxxx PO Box 13 889, Christchurch 8013
http://www.bluewatersys.com New Zealand
Phone: +64 3 3779127 Freecall: Australia 1800 148 751
Fax: +64 3 3779135 USA 1800 261 2934
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/