#define MODULE #define __KERNEL__ #define __NO_VERSION__ // Needed by all modules #include #include #include #include // Needed for KERN_ALERT #include #include //#define SCHED_YIELD 1 spinlock_t io_request_lock = SPIN_LOCK_UNLOCKED; spinlock_t dev_lock = SPIN_LOCK_UNLOCKED; //typedef struct buffer_head * pmy_b_end_io; typedef void (*b_end_io1)(struct buffer_head *bh, int uptodate); /* I/O completion */ typedef struct { struct buffer_head *bh; void (*b_end_io)(struct buffer_head *bh, int uptodate); /* I/O completion */ //b_end_io1 b_end_io; }my_b_end_io, * pmy_b_end_io; asmlinkage void kti_b_end_io(struct buffer_head *bh, int uptodate) { int j; //printk("In kti_b_end_io \n "); //struct buffer_head private; pmy_b_end_io private; private = bh->b_private; bh->b_private = NULL; bh->b_end_io = private->b_end_io; spin_lock_irq(&io_request_lock); if(uptodate){ } //printk("end io end \n"); spin_unlock_irq(&io_request_lock); bh->b_end_io(bh, uptodate); kfree(private); } asmlinkage int (*original_make_request_fn)(request_queue_t * q, int rw,struct buffer_head *bh); asmlinkage pmy_b_end_io kti_get_private(void) { #if 1 pmy_b_end_io ptr = NULL; while (!ptr) { ptr = (pmy_b_end_io)kmalloc(sizeof(my_b_end_io), GFP_NOIO); if(!ptr) { __set_current_state(TASK_RUNNING); current->policy |= SCHED_NORMAL; schedule(); } } #endif return ptr; } asmlinkage int kti_make_request_fn(request_queue_t * q, int rw, struct buffer_head *bh) { int retcode; int i; pmy_b_end_io private; printk("In kti_make_request_fn\n"); private = kti_get_private(); private->bh = bh; private->b_end_io = bh->b_end_io; bh->b_private = private; bh->b_end_io = kti_b_end_io; struct buffer_head *bh_temp = bh; int size; struct request *req; int k=0; #if 1 switch (rw) { case READA: case READ: printk("In kti_make_request_fn read\n"); //ret = brw_kiovec(READ, 1, &iobuf, old_dev, // iobuf->blocks, old_blksize) ; break; case WRITE: { struct kiobuf *iobuf; int result; int ret; printk("In kti_make_request_fn write\n"); /* Allocate an I/O vector */ result = alloc_kiovec(1, &iobuf); if (result) return result; char *buf = "ABCDEF"; //bh->b_data; int new_blksize = bh->b_size; int numberBlocks = 1; kdev_t new_dev = MKDEV(253,0); int count = 6; //new_blksize; /* Map the user I/O buffer and do the I/O. */ result = map_user_kiobuf(rw, iobuf, (unsigned long) buf, count); if (result) { free_kiovec(1, &iobuf); return result; } spin_lock(&dev_lock); ret = brw_kiovec(WRITE, 1, &iobuf, new_dev, numberBlocks, new_blksize); spin_unlock(&dev_lock); /* Clean up and return. */ unmap_kiobuf(iobuf); free_kiovec(1, &iobuf); /* bh->b_dev = MKDEV(253,0); atomic_set(&bh->b_count, 1); printk("before submit sbull \n"); //blk_dev[253].request_queue.make_request_fn(q, rw, bh); //submit_bh(WRITE, bh); printk("after submit sbull \n"); */ } break; } #endif retcode = original_make_request_fn(q, rw, bh); return retcode; } int init_module() { printk("In sbull module start \n"); spin_lock_irq(&io_request_lock); original_make_request_fn = blk_dev[254].request_queue.make_request_fn; blk_dev[254].request_queue.make_request_fn = kti_make_request_fn; spin_unlock_irq(&io_request_lock); printk("In sbull module end \n"); return 0; } void cleanup_module() { printk("Clean sbull module start \n"); spin_lock_irq(&io_request_lock); blk_dev[254].request_queue.make_request_fn = original_make_request_fn; spin_unlock_irq(&io_request_lock); printk("Clean sbull module end \n"); }