On Thu, 20 Jul 2000, Linus Torvalds wrote:
> I wasn't serious.
>
> It was a comment to your "if you have a million inodes it's a lot of bits"
> comment.
>
> Considering that the inode is probably 200+ bytes, the million inodes case
> is fairly scary. And the 4 bytes won't be that noticeable..
>
around 416 bytes [just cat /proc/slabinfo, the third number is size,
rounded to cache line size]
> You'd save more by making the quota stuff go away when quotas aren't
> enabled..
>
Or by removing a superflous variable in each buffer head: the kiobuf code
adds it's own callback parameter instead of using the generic one.
Someone promised several times to merge my patch ;)
--
Manfred
<<<<<<<<<<<< untested patch, I don't have a raid or kiobuf test setup
// $Header$
// Kernel Version:
// VERSION = 2
// PATCHLEVEL = 4
// SUBLEVEL = 0
// EXTRAVERSION = -test5
--- 2.4/drivers/block/raid1.c Thu Jul 20 20:21:51 2000
+++ build-2.4/drivers/block/raid1.c Thu Jul 20 20:18:20 2000
@@ -379,7 +379,7 @@
}
void raid1_end_request (struct buffer_head *bh, int uptodate)
{
- struct raid1_bh * r1_bh = (struct raid1_bh *)(bh->b_dev_id);
+ struct raid1_bh * r1_bh = (struct raid1_bh *)(bh->b_private);
/*
* this branch is our 'one mirror IO has finished' event handler:
@@ -626,7 +626,7 @@
bh_req->b_rdev = mirror->dev;
/* bh_req->b_rsector = bh->n_rsector; */
bh_req->b_end_io = raid1_end_request;
- bh_req->b_dev_id = r1_bh;
+ bh_req->b_private = r1_bh;
q = blk_get_queue(bh_req->b_rdev);
generic_make_request (q, rw, bh_req);
return 0;
@@ -679,7 +679,7 @@
mbh->b_data = bh->b_data;
mbh->b_list = BUF_LOCKED;
mbh->b_end_io = raid1_end_request;
- mbh->b_dev_id = r1_bh;
+ mbh->b_private = r1_bh;
mbh->b_next = r1_bh->mirror_bh_list;
r1_bh->mirror_bh_list = mbh;
@@ -1192,7 +1192,7 @@
mbh->b_data = bh->b_data;
mbh->b_list = BUF_LOCKED;
mbh->b_end_io = end_sync_write;
- mbh->b_dev_id = r1_bh;
+ mbh->b_private = r1_bh;
mbh->b_next = r1_bh->mirror_bh_list;
r1_bh->mirror_bh_list = mbh;
@@ -1430,7 +1430,7 @@
if (bh->b_data != (char *) page_address(bh->b_page))
BUG();
bh->b_end_io = end_sync_read;
- bh->b_dev_id = (void *) r1_bh;
+ bh->b_private = r1_bh;
bh->b_rsector = block_nr<<1;
init_waitqueue_head(&bh->b_wait);
@@ -1448,7 +1448,7 @@
static void end_sync_read(struct buffer_head *bh, int uptodate)
{
- struct raid1_bh * r1_bh = (struct raid1_bh *)(bh->b_dev_id);
+ struct raid1_bh * r1_bh = (struct raid1_bh *)(bh->b_private);
/* we have read a block, now it needs to be re-written,
* or re-read if the read failed.
@@ -1463,7 +1463,7 @@
static void end_sync_write(struct buffer_head *bh, int uptodate)
{
- struct raid1_bh * r1_bh = (struct raid1_bh *)(bh->b_dev_id);
+ struct raid1_bh * r1_bh = (struct raid1_bh *)(bh->b_private);
if (!uptodate)
md_error (mddev_to_kdev(r1_bh->mddev), bh->b_dev);
--- 2.4/drivers/block/raid5.c Thu Jul 20 20:21:51 2000
+++ build-2.4/drivers/block/raid5.c Thu Jul 20 20:18:20 2000
@@ -597,7 +597,7 @@
static void raid5_end_request (struct buffer_head * bh, int uptodate)
{
- struct stripe_head *sh = bh->b_dev_id;
+ struct stripe_head *sh = bh->b_private;
raid5_conf_t *conf = sh->raid_conf;
int disks = conf->raid_disks, i;
unsigned long flags;
--- 2.4/fs/buffer.c Thu Jul 20 20:21:59 2000
+++ build-2.4/fs/buffer.c Thu Jul 20 20:18:20 2000
@@ -708,11 +708,11 @@
}
}
-void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *dev_id)
+void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
{
bh->b_list = BUF_CLEAN;
bh->b_end_io = handler;
- bh->b_dev_id = dev_id;
+ bh->b_private = private;
}
static void end_buffer_io_sync(struct buffer_head *bh, int uptodate)
@@ -1742,7 +1742,7 @@
mark_buffer_uptodate(bh, uptodate);
- kiobuf = bh->b_kiobuf;
+ kiobuf = bh->b_private;
unlock_buffer(bh);
end_kio_request(kiobuf, uptodate);
}
@@ -1862,11 +1862,10 @@
set_bh_page(tmp, map, offset);
tmp->b_this_page = tmp;
- init_buffer(tmp, end_buffer_io_kiobuf, NULL);
+ init_buffer(tmp, end_buffer_io_kiobuf, iobuf);
tmp->b_dev = dev;
tmp->b_blocknr = blocknr;
tmp->b_state = 1 << BH_Mapped;
- tmp->b_kiobuf = iobuf;
if (rw == WRITE) {
set_bit(BH_Uptodate, &tmp->b_state);
--- 2.4/include/linux/fs.h Thu Jul 20 20:22:03 2000
+++ build-2.4/include/linux/fs.h Thu Jul 20 20:19:18 2000
@@ -238,11 +238,10 @@
char * b_data; /* pointer to data block (512 byte) */
struct page *b_page; /* the page this bh is mapped to */
void (*b_end_io)(struct buffer_head *bh, int uptodate); /* I/O completion */
- void *b_dev_id;
+ void *b_private; /* reserved for b_end_io */
unsigned long b_rsector; /* Real buffer location on disk */
wait_queue_head_t b_wait;
- struct kiobuf * b_kiobuf; /* kiobuf which owns this IO */
};
typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
--- 2.4/mm/highmem.c Mon Jun 12 10:07:59 2000
+++ build-2.4/mm/highmem.c Thu Jul 20 20:18:20 2000
@@ -281,7 +281,7 @@
static inline void bounce_end_io (struct buffer_head *bh, int uptodate)
{
- struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_dev_id);
+ struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_private);
bh_orig->b_end_io(bh_orig, uptodate);
__free_page(bh->b_page);
@@ -295,7 +295,7 @@
static void bounce_end_io_read (struct buffer_head *bh, int uptodate)
{
- struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_dev_id);
+ struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_private);
if (uptodate)
copy_to_high_bh_irq(bh_orig, bh);
@@ -354,10 +354,9 @@
copy_from_high_bh(bh, bh_orig);
} else
bh->b_end_io = bounce_end_io_read;
- bh->b_dev_id = (void *)bh_orig;
+ bh->b_private = (void *)bh_orig;
bh->b_rsector = bh_orig->b_rsector;
memset(&bh->b_wait, -1, sizeof(bh->b_wait));
- bh->b_kiobuf = NULL;
return bh;
}
>>>>>>>>>>>>
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/
This archive was generated by hypermail 2b29 : Sun Jul 23 2000 - 21:00:14 EST