From: Jens Axboe Date: Thu, 1 Sep 2005 11:11:25 +0000 (+0200) Subject: [PATCH] Update kernel patch to 2.6.13-mm1 X-Git-Tag: blktrace-0.99~227 X-Git-Url: https://git.kernel.dk/?a=commitdiff_plain;h=56c7d54ddfb807b769bf42d5b6ba4d34283f6cc0;p=blktrace.git [PATCH] Update kernel patch to 2.6.13-mm1 --- diff --git a/README b/README index 6167b84..3c323dd 100644 --- a/README +++ b/README @@ -8,19 +8,11 @@ Alan D. Brunelle (threading and splitup into two seperate programs). Requirements ------------ -Currently, the kernel support patch isn't merged yet. The last posted patch -is: - -http://marc.theaimsgroup.com/?l=linux-kernel&m=112487579802074&w=2 - -and applies against 2.6.13-rc6-mm2. The patch is also included in the git -repository you pulled to get this file: +You need to be running a 2.6.13-mm1 kernel with the blk-trace patch +included in this repository. If you forgot where you got it, the url is: rsync://rsync.kernel.org/pub/scm/linux/kernel/git/axboe/blktrace.git -Additionally, you need to apply the relayfs-read update patch also located -in the kernel/ directory. - Usage ----- diff --git a/kernel/blk-trace-2.6.13-mm1-A0 b/kernel/blk-trace-2.6.13-mm1-A0 new file mode 100644 index 0000000..70e7356 --- /dev/null +++ b/kernel/blk-trace-2.6.13-mm1-A0 @@ -0,0 +1,537 @@ +diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/blktrace.c linux-2.6.13-rc6-mm2/drivers/block/blktrace.c +--- /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/blktrace.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-2.6.13-rc6-mm2/drivers/block/blktrace.c 2005-08-30 20:56:43.000000000 +0200 +@@ -0,0 +1,184 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, }; ++ ++static inline unsigned long long blk_trace_cpu_time(void) ++{ ++ unsigned long long offset; ++ ++ offset = sched_clock() - per_cpu(blk_trace_cpu_offset, get_cpu()); ++ put_cpu(); ++ return offset; ++} ++ ++void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ++ int rw, u32 what, int error, int pdu_len, char *pdu_data) ++{ ++ struct blk_io_trace t; ++ unsigned long flags; ++ ++ if (rw & (1 << BIO_RW_BARRIER)) ++ what |= BLK_TC_ACT(BLK_TC_BARRIER); ++ if (rw & (1 << BIO_RW_SYNC)) ++ what |= BLK_TC_ACT(BLK_TC_SYNC); ++ ++ if (rw & WRITE) ++ what |= BLK_TC_ACT(BLK_TC_WRITE); ++ else ++ what |= BLK_TC_ACT(BLK_TC_READ); ++ ++ if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) ++ return; ++ ++ t.magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; ++ t.sequence = atomic_add_return(1, &bt->sequence); ++ t.time = blk_trace_cpu_time(); ++ t.sector = sector; ++ t.bytes = bytes; ++ t.action = what; ++ t.pid = current->pid; ++ t.error = error; ++ t.pdu_len = pdu_len; ++ ++ local_irq_save(flags); ++ __relay_write(bt->rchan, &t, sizeof(t)); ++ if (pdu_len) ++ __relay_write(bt->rchan, pdu_data, pdu_len); ++ local_irq_restore(flags); ++} ++ ++int blk_stop_trace(struct block_device *bdev) ++{ ++ request_queue_t *q = bdev_get_queue(bdev); ++ struct blk_trace *bt = NULL; ++ int ret = -EINVAL; ++ ++ if (!q) ++ return -ENXIO; ++ ++ down(&bdev->bd_sem); ++ ++ spin_lock_irq(q->queue_lock); ++ if (q->blk_trace) { ++ bt = q->blk_trace; ++ q->blk_trace = NULL; ++ ret = 0; ++ } ++ spin_unlock_irq(q->queue_lock); ++ ++ up(&bdev->bd_sem); ++ ++ if (bt) { ++ relay_close(bt->rchan); ++ kfree(bt); ++ } ++ ++ return ret; ++} ++ ++int blk_start_trace(struct block_device *bdev, char __user *arg) ++{ ++ request_queue_t *q = bdev_get_queue(bdev); ++ struct blk_user_trace_setup buts; ++ struct blk_trace *bt; ++ char b[BDEVNAME_SIZE]; ++ int ret = 0; ++ ++ if (!q) ++ return -ENXIO; ++ ++ if (copy_from_user(&buts, arg, sizeof(buts))) ++ return -EFAULT; ++ ++ if (!buts.buf_size || !buts.buf_nr) ++ return -EINVAL; ++ ++ strcpy(buts.name, bdevname(bdev, b)); ++ ++ if (copy_to_user(arg, &buts, sizeof(buts))) ++ return -EFAULT; ++ ++ down(&bdev->bd_sem); ++ ret = -EBUSY; ++ if (q->blk_trace) ++ goto err; ++ ++ ret = -ENOMEM; ++ bt = kmalloc(sizeof(*bt), GFP_KERNEL); ++ if (!bt) ++ goto err; ++ ++ atomic_set(&bt->sequence, 0); ++ ++ bt->rchan = relay_open(bdevname(bdev, b), NULL, buts.buf_size, ++ buts.buf_nr, NULL); ++ ret = -EIO; ++ if (!bt->rchan) ++ goto err; ++ ++ bt->act_mask = buts.act_mask; ++ if (!bt->act_mask) ++ bt->act_mask = (u16) -1; ++ ++ spin_lock_irq(q->queue_lock); ++ q->blk_trace = bt; ++ spin_unlock_irq(q->queue_lock); ++ ret = 0; ++err: ++ up(&bdev->bd_sem); ++ return ret; ++} ++ ++static void blk_trace_check_cpu_time(void *data) ++{ ++ unsigned long long a, b, *t; ++ struct timeval tv; ++ int cpu = get_cpu(); ++ ++ t = &per_cpu(blk_trace_cpu_offset, cpu); ++ ++ a = sched_clock(); ++ do_gettimeofday(&tv); ++ b = sched_clock(); ++ ++ *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000; ++ *t -= (a + b) / 2; ++ put_cpu(); ++} ++ ++static int blk_trace_calibrate_offsets(void) ++{ ++ int cpus = num_online_cpus(); ++ unsigned long flags; ++ long long *time_offset; ++ ++ time_offset = kmalloc(cpus * sizeof(long long), GFP_KERNEL); ++ if (!time_offset) { ++ printk(KERN_ERR "blktrace: time offsets will be unreliable\n"); ++ return -ENOMEM; ++ } ++ ++ memset(time_offset, 0, cpus * sizeof(long long)); ++ ++ smp_call_function(blk_trace_check_cpu_time, time_offset, 1, 1); ++ local_irq_save(flags); ++ blk_trace_check_cpu_time(time_offset); ++ local_irq_restore(flags); ++ ++ kfree(time_offset); ++ return 0; ++} ++ ++static __init int blk_trace_init(void) ++{ ++ return blk_trace_calibrate_offsets(); ++} ++ ++module_init(blk_trace_init); ++ +diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/elevator.c linux-2.6.13-rc6-mm2/drivers/block/elevator.c +--- /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/elevator.c 2005-08-30 20:55:03.000000000 +0200 ++++ linux-2.6.13-rc6-mm2/drivers/block/elevator.c 2005-08-26 11:03:38.000000000 +0200 +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + + #include + +@@ -371,6 +372,9 @@ struct request *elv_next_request(request + int ret; + + while ((rq = __elv_next_request(q)) != NULL) { ++ ++ blk_add_trace_rq(q, rq, BLK_TA_ISSUE); ++ + /* + * just mark as started even if we don't start it, a request + * that has been delayed should not be passed by new incoming +diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/ioctl.c linux-2.6.13-rc6-mm2/drivers/block/ioctl.c +--- /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/ioctl.c 2005-08-30 20:55:03.000000000 +0200 ++++ linux-2.6.13-rc6-mm2/drivers/block/ioctl.c 2005-08-26 11:03:38.000000000 +0200 +@@ -4,6 +4,7 @@ + #include + #include + #include ++#include + #include + + static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg) +@@ -188,6 +189,10 @@ static int blkdev_locked_ioctl(struct fi + return put_ulong(arg, bdev->bd_inode->i_size >> 9); + case BLKGETSIZE64: + return put_u64(arg, bdev->bd_inode->i_size); ++ case BLKSTARTTRACE: ++ return blk_start_trace(bdev, (char __user *) arg); ++ case BLKSTOPTRACE: ++ return blk_stop_trace(bdev); + } + return -ENOIOCTLCMD; + } +diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/Kconfig linux-2.6.13-rc6-mm2/drivers/block/Kconfig +--- /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/Kconfig 2005-08-30 20:55:03.000000000 +0200 ++++ linux-2.6.13-rc6-mm2/drivers/block/Kconfig 2005-08-29 08:00:42.000000000 +0200 +@@ -419,6 +419,14 @@ config LBD + your machine, or if you want to have a raid or loopback device + bigger than 2TB. Otherwise say N. + ++config BLK_DEV_IO_TRACE ++ bool "Support for tracing block io actions" ++ select RELAYFS_FS ++ help ++ Say Y here, if you want to be able to trace the block layer actions ++ on a given queue. ++ ++ + config CDROM_PKTCDVD + tristate "Packet writing on CD/DVD media" + depends on !UML +diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/ll_rw_blk.c linux-2.6.13-rc6-mm2/drivers/block/ll_rw_blk.c +--- /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/ll_rw_blk.c 2005-08-30 20:55:03.000000000 +0200 ++++ linux-2.6.13-rc6-mm2/drivers/block/ll_rw_blk.c 2005-08-26 11:03:38.000000000 +0200 +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + + /* + * for max sense size +@@ -1625,6 +1626,12 @@ void blk_cleanup_queue(request_queue_t * + if (q->queue_tags) + __blk_queue_free_tags(q); + ++ if (q->blk_trace) { ++ relay_close(q->blk_trace->rchan); ++ kfree(q->blk_trace); ++ q->blk_trace = NULL; ++ } ++ + blk_queue_ordered(q, QUEUE_ORDERED_NONE); + + kmem_cache_free(requestq_cachep, q); +@@ -1971,6 +1978,8 @@ rq_starved: + + rq_init(q, rq); + rq->rl = rl; ++ ++ blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); + out: + return rq; + } +@@ -1999,6 +2008,8 @@ static struct request *get_request_wait( + if (!rq) { + struct io_context *ioc; + ++ blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); ++ + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); + io_schedule(); +@@ -2052,6 +2063,8 @@ EXPORT_SYMBOL(blk_get_request); + */ + void blk_requeue_request(request_queue_t *q, struct request *rq) + { ++ blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); ++ + if (blk_rq_tagged(rq)) + blk_queue_end_tag(q, rq); + +@@ -2665,6 +2678,8 @@ static int __make_request(request_queue_ + if (!q->back_merge_fn(q, req, bio)) + break; + ++ blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); ++ + req->biotail->bi_next = bio; + req->biotail = bio; + req->nr_sectors = req->hard_nr_sectors += nr_sectors; +@@ -2680,6 +2695,8 @@ static int __make_request(request_queue_ + if (!q->front_merge_fn(q, req, bio)) + break; + ++ blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); ++ + bio->bi_next = req->bio; + req->bio = bio; + +@@ -2705,6 +2722,8 @@ static int __make_request(request_queue_ + } + + get_rq: ++ blk_add_trace_bio(q, bio, BLK_TA_QUEUE); ++ + /* + * Grab a free request. This is might sleep but can not fail. + * Returns with the queue unlocked. +@@ -2981,6 +3000,10 @@ end_io: + blk_partition_remap(bio); + + ret = q->make_request_fn(q, bio); ++ ++ if (ret) ++ blk_add_trace_bio(q, bio, BLK_TA_QUEUE); ++ + } while (ret); + } + +@@ -3099,6 +3122,8 @@ static int __end_that_request_first(stru + int total_bytes, bio_nbytes, error, next_idx = 0; + struct bio *bio; + ++ blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); ++ + /* + * extend uptodate bool to allow < 0 value to be direct io error + */ +diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/Makefile linux-2.6.13-rc6-mm2/drivers/block/Makefile +--- /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/Makefile 2005-08-30 20:55:03.000000000 +0200 ++++ linux-2.6.13-rc6-mm2/drivers/block/Makefile 2005-08-26 11:03:38.000000000 +0200 +@@ -45,3 +45,5 @@ obj-$(CONFIG_VIODASD) += viodasd.o + obj-$(CONFIG_BLK_DEV_SX8) += sx8.o + obj-$(CONFIG_BLK_DEV_UB) += ub.o + ++obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o ++ +diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/include/linux/blkdev.h linux-2.6.13-rc6-mm2/include/linux/blkdev.h +--- /opt/kernel/linux-2.6.13-rc6-mm2/include/linux/blkdev.h 2005-08-30 20:55:03.000000000 +0200 ++++ linux-2.6.13-rc6-mm2/include/linux/blkdev.h 2005-08-26 11:03:38.000000000 +0200 +@@ -22,6 +22,7 @@ typedef struct request_queue request_que + struct elevator_queue; + typedef struct elevator_queue elevator_t; + struct request_pm_state; ++struct blk_trace; + + #define BLKDEV_MIN_RQ 4 + #define BLKDEV_MAX_RQ 128 /* Default maximum */ +@@ -412,6 +413,8 @@ struct request_queue + */ + struct request *flush_rq; + unsigned char ordered; ++ ++ struct blk_trace *blk_trace; + }; + + enum { +diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/include/linux/blktrace.h linux-2.6.13-rc6-mm2/include/linux/blktrace.h +--- /opt/kernel/linux-2.6.13-rc6-mm2/include/linux/blktrace.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-2.6.13-rc6-mm2/include/linux/blktrace.h 2005-08-30 09:57:59.000000000 +0200 +@@ -0,0 +1,145 @@ ++#ifndef BLKTRACE_H ++#define BLKTRACE_H ++ ++#include ++#include ++#include ++ ++/* ++ * Trace categories ++ */ ++enum { ++ BLK_TC_READ = 1 << 0, /* reads */ ++ BLK_TC_WRITE = 1 << 1, /* writes */ ++ BLK_TC_BARRIER = 1 << 2, /* barrier */ ++ BLK_TC_SYNC = 1 << 3, /* barrier */ ++ BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ ++ BLK_TC_REQUEUE = 1 << 5, /* requeueing */ ++ BLK_TC_ISSUE = 1 << 6, /* issue */ ++ BLK_TC_COMPLETE = 1 << 7, /* completions */ ++ BLK_TC_FS = 1 << 8, /* fs requests */ ++ BLK_TC_PC = 1 << 9, /* pc requests */ ++ ++ BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ ++}; ++ ++#define BLK_TC_SHIFT (16) ++#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT) ++ ++/* ++ * Basic trace actions ++ */ ++enum { ++ __BLK_TA_QUEUE = 1, /* queued */ ++ __BLK_TA_BACKMERGE, /* back merged to existing rq */ ++ __BLK_TA_FRONTMERGE, /* front merge to existing rq */ ++ __BLK_TA_GETRQ, /* allocated new request */ ++ __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */ ++ __BLK_TA_REQUEUE, /* request requeued */ ++ __BLK_TA_ISSUE, /* sent to driver */ ++ __BLK_TA_COMPLETE, /* completed by driver */ ++}; ++ ++/* ++ * Trace actions in full. Additionally, read or write is masked ++ */ ++#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE)) ++#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) ++#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) ++#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE)) ++#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE)) ++#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE)) ++#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE)) ++#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE)) ++ ++#define BLK_IO_TRACE_MAGIC 0x65617400 ++#define BLK_IO_TRACE_VERSION 0x02 ++ ++/* ++ * The trace itself ++ */ ++struct blk_io_trace { ++ u32 magic; /* MAGIC << 8 | version */ ++ u32 sequence; /* event number */ ++ u64 time; /* in microseconds */ ++ u64 sector; /* disk offset */ ++ u32 bytes; /* transfer length */ ++ u32 action; /* what happened */ ++ u32 pid; /* who did it */ ++ u16 error; /* completion error */ ++ u16 pdu_len; /* length of data after this trace */ ++}; ++ ++struct blk_trace { ++ struct rchan *rchan; ++ atomic_t sequence; ++ u16 act_mask; ++}; ++ ++/* ++ * User setup structure passed with BLKSTARTTRACE ++ */ ++struct blk_user_trace_setup { ++ char name[BDEVNAME_SIZE]; /* output */ ++ u16 act_mask; /* input */ ++ u32 buf_size; /* input */ ++ u32 buf_nr; /* input */ ++}; ++ ++#if defined(CONFIG_BLK_DEV_IO_TRACE) ++extern int blk_start_trace(struct block_device *, char __user *); ++extern int blk_stop_trace(struct block_device *); ++extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, char *); ++ ++static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq, ++ u32 what) ++{ ++ struct blk_trace *bt = q->blk_trace; ++ int rw = rq->flags & 0x07; ++ ++ if (likely(!bt)) ++ return; ++ ++ if (blk_pc_request(rq)) { ++ what |= BLK_TC_ACT(BLK_TC_PC); ++ __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd); ++ } else { ++ what |= BLK_TC_ACT(BLK_TC_FS); ++ __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL); ++ } ++} ++ ++static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio, ++ u32 what) ++{ ++ struct blk_trace *bt = q->blk_trace; ++ ++ if (likely(!bt)) ++ return; ++ ++ __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL); ++} ++ ++static inline void blk_add_trace_generic(struct request_queue *q, ++ struct bio *bio, int rw, u32 what) ++{ ++ struct blk_trace *bt = q->blk_trace; ++ ++ if (likely(!bt)) ++ return; ++ ++ if (bio) ++ blk_add_trace_bio(q, bio, what); ++ else ++ __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL); ++} ++ ++#else /* !CONFIG_BLK_DEV_IO_TRACE */ ++#define blk_start_trace(bdev, arg) (-EINVAL) ++#define blk_stop_trace(bdev) (-EINVAL) ++#define blk_add_trace_rq(q, rq, what) do { } while (0) ++#define blk_add_trace_bio(q, rq, what) do { } while (0) ++#define blk_add_trace_generic(q, rq, rw, what) do { } while (0) ++#endif /* CONFIG_BLK_DEV_IO_TRACE */ ++ ++#endif +diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/include/linux/fs.h linux-2.6.13-rc6-mm2/include/linux/fs.h +--- /opt/kernel/linux-2.6.13-rc6-mm2/include/linux/fs.h 2005-08-30 20:55:03.000000000 +0200 ++++ linux-2.6.13-rc6-mm2/include/linux/fs.h 2005-08-26 11:03:38.000000000 +0200 +@@ -196,6 +196,8 @@ extern int dir_notify_enable; + #define BLKBSZGET _IOR(0x12,112,size_t) + #define BLKBSZSET _IOW(0x12,113,size_t) + #define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */ ++#define BLKSTARTTRACE _IOWR(0x12,115,struct blk_user_trace_setup) ++#define BLKSTOPTRACE _IO(0x12,116) + + #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ + #define FIBMAP _IO(0x00,1) /* bmap access */ diff --git a/kernel/blk-trace-2.6.13-rc6-mm2-B1 b/kernel/blk-trace-2.6.13-rc6-mm2-B1 deleted file mode 100644 index 9a58fbb..0000000 --- a/kernel/blk-trace-2.6.13-rc6-mm2-B1 +++ /dev/null @@ -1,477 +0,0 @@ -diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/Kconfig linux-2.6.13-rc6-mm2/drivers/block/Kconfig ---- /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/Kconfig 2005-08-24 13:17:28.000000000 +0200 -+++ linux-2.6.13-rc6-mm2/drivers/block/Kconfig 2005-08-24 11:52:14.000000000 +0200 -@@ -419,6 +419,14 @@ config LBD - your machine, or if you want to have a raid or loopback device - bigger than 2TB. Otherwise say N. - -+config BLK_DEV_IO_TRACE -+ bool "Support for tracing block io actions" -+ select RELAYFS_FS -+ help -+ Say Y here, if you want to be able to trace the block layer actions -+ on a given queue. -+ -+ - config CDROM_PKTCDVD - tristate "Packet writing on CD/DVD media" - depends on !UML -diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/Makefile linux-2.6.13-rc6-mm2/drivers/block/Makefile ---- /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/Makefile 2005-08-07 20:18:56.000000000 +0200 -+++ linux-2.6.13-rc6-mm2/drivers/block/Makefile 2005-08-24 11:52:14.000000000 +0200 -@@ -45,3 +45,5 @@ obj-$(CONFIG_VIODASD) += viodasd.o - obj-$(CONFIG_BLK_DEV_SX8) += sx8.o - obj-$(CONFIG_BLK_DEV_UB) += ub.o - -+obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o -+ -diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/blktrace.c linux-2.6.13-rc6-mm2/drivers/block/blktrace.c ---- /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/blktrace.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-2.6.13-rc6-mm2/drivers/block/blktrace.c 2005-08-24 13:22:11.000000000 +0200 -@@ -0,0 +1,124 @@ -+#include -+#include -+#include -+#include -+#include -+ -+void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, -+ int rw, u32 what, int error, int pdu_len, char *pdu_data) -+{ -+ struct blk_io_trace t; -+ unsigned long flags; -+ -+ if (rw & (1 << BIO_RW_BARRIER)) -+ what |= BLK_TC_ACT(BLK_TC_BARRIER); -+ if (rw & (1 << BIO_RW_SYNC)) -+ what |= BLK_TC_ACT(BLK_TC_SYNC); -+ -+ if (rw & WRITE) -+ what |= BLK_TC_ACT(BLK_TC_WRITE); -+ else -+ what |= BLK_TC_ACT(BLK_TC_READ); -+ -+ if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) -+ return; -+ -+ t.magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; -+ t.sequence = atomic_add_return(1, &bt->sequence); -+ t.time = sched_clock(); -+ t.sector = sector; -+ t.bytes = bytes; -+ t.action = what; -+ t.pid = current->pid; -+ t.error = error; -+ t.pdu_len = pdu_len; -+ -+ local_irq_save(flags); -+ __relay_write(bt->rchan, &t, sizeof(t)); -+ if (pdu_len) -+ __relay_write(bt->rchan, pdu_data, pdu_len); -+ local_irq_restore(flags); -+} -+ -+int blk_stop_trace(struct block_device *bdev) -+{ -+ request_queue_t *q = bdev_get_queue(bdev); -+ struct blk_trace *bt = NULL; -+ int ret = -EINVAL; -+ -+ if (!q) -+ return -ENXIO; -+ -+ down(&bdev->bd_sem); -+ -+ spin_lock_irq(q->queue_lock); -+ if (q->blk_trace) { -+ bt = q->blk_trace; -+ q->blk_trace = NULL; -+ ret = 0; -+ } -+ spin_unlock_irq(q->queue_lock); -+ -+ up(&bdev->bd_sem); -+ -+ if (bt) { -+ relay_close(bt->rchan); -+ kfree(bt); -+ } -+ -+ return ret; -+} -+ -+int blk_start_trace(struct block_device *bdev, char __user *arg) -+{ -+ request_queue_t *q = bdev_get_queue(bdev); -+ struct blk_user_trace_setup buts; -+ struct blk_trace *bt; -+ char b[BDEVNAME_SIZE]; -+ int ret = 0; -+ -+ if (!q) -+ return -ENXIO; -+ -+ if (copy_from_user(&buts, arg, sizeof(buts))) -+ return -EFAULT; -+ -+ if (!buts.buf_size || !buts.buf_nr) -+ return -EINVAL; -+ -+ strcpy(buts.name, bdevname(bdev, b)); -+ -+ if (copy_to_user(arg, &buts, sizeof(buts))) -+ return -EFAULT; -+ -+ down(&bdev->bd_sem); -+ ret = -EBUSY; -+ if (q->blk_trace) -+ goto err; -+ -+ ret = -ENOMEM; -+ bt = kmalloc(sizeof(*bt), GFP_KERNEL); -+ if (!bt) -+ goto err; -+ -+ atomic_set(&bt->sequence, 0); -+ -+ bt->rchan = relay_open(bdevname(bdev, b), NULL, buts.buf_size, -+ buts.buf_nr, NULL); -+ ret = -EIO; -+ if (!bt->rchan) -+ goto err; -+ -+ bt->act_mask = buts.act_mask; -+ if (!bt->act_mask) -+ bt->act_mask = (u16) -1; -+ -+ spin_lock_irq(q->queue_lock); -+ q->blk_trace = bt; -+ spin_unlock_irq(q->queue_lock); -+ ret = 0; -+err: -+ up(&bdev->bd_sem); -+ return ret; -+} -+ -diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/elevator.c linux-2.6.13-rc6-mm2/drivers/block/elevator.c ---- /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/elevator.c 2005-08-07 20:18:56.000000000 +0200 -+++ linux-2.6.13-rc6-mm2/drivers/block/elevator.c 2005-08-24 11:52:14.000000000 +0200 -@@ -34,6 +34,7 @@ - #include - #include - #include -+#include - - #include - -@@ -371,6 +372,9 @@ struct request *elv_next_request(request - int ret; - - while ((rq = __elv_next_request(q)) != NULL) { -+ -+ blk_add_trace_rq(q, rq, BLK_TA_ISSUE); -+ - /* - * just mark as started even if we don't start it, a request - * that has been delayed should not be passed by new incoming -diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/ioctl.c linux-2.6.13-rc6-mm2/drivers/block/ioctl.c ---- /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/ioctl.c 2005-08-07 20:18:56.000000000 +0200 -+++ linux-2.6.13-rc6-mm2/drivers/block/ioctl.c 2005-08-24 11:52:14.000000000 +0200 -@@ -4,6 +4,7 @@ - #include - #include - #include -+#include - #include - - static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg) -@@ -188,6 +189,10 @@ static int blkdev_locked_ioctl(struct fi - return put_ulong(arg, bdev->bd_inode->i_size >> 9); - case BLKGETSIZE64: - return put_u64(arg, bdev->bd_inode->i_size); -+ case BLKSTARTTRACE: -+ return blk_start_trace(bdev, (char __user *) arg); -+ case BLKSTOPTRACE: -+ return blk_stop_trace(bdev); - } - return -ENOIOCTLCMD; - } -diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/ll_rw_blk.c linux-2.6.13-rc6-mm2/drivers/block/ll_rw_blk.c ---- /opt/kernel/linux-2.6.13-rc6-mm2/drivers/block/ll_rw_blk.c 2005-08-24 13:17:28.000000000 +0200 -+++ linux-2.6.13-rc6-mm2/drivers/block/ll_rw_blk.c 2005-08-24 11:52:14.000000000 +0200 -@@ -29,6 +29,7 @@ - #include - #include - #include -+#include - - /* - * for max sense size -@@ -1625,6 +1626,12 @@ void blk_cleanup_queue(request_queue_t * - if (q->queue_tags) - __blk_queue_free_tags(q); - -+ if (q->blk_trace) { -+ relay_close(q->blk_trace->rchan); -+ kfree(q->blk_trace); -+ q->blk_trace = NULL; -+ } -+ - blk_queue_ordered(q, QUEUE_ORDERED_NONE); - - kmem_cache_free(requestq_cachep, q); -@@ -1971,6 +1978,8 @@ rq_starved: - - rq_init(q, rq); - rq->rl = rl; -+ -+ blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); - out: - return rq; - } -@@ -1999,6 +2008,8 @@ static struct request *get_request_wait( - if (!rq) { - struct io_context *ioc; - -+ blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); -+ - __generic_unplug_device(q); - spin_unlock_irq(q->queue_lock); - io_schedule(); -@@ -2052,6 +2063,8 @@ EXPORT_SYMBOL(blk_get_request); - */ - void blk_requeue_request(request_queue_t *q, struct request *rq) - { -+ blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); -+ - if (blk_rq_tagged(rq)) - blk_queue_end_tag(q, rq); - -@@ -2665,6 +2678,8 @@ static int __make_request(request_queue_ - if (!q->back_merge_fn(q, req, bio)) - break; - -+ blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); -+ - req->biotail->bi_next = bio; - req->biotail = bio; - req->nr_sectors = req->hard_nr_sectors += nr_sectors; -@@ -2680,6 +2695,8 @@ static int __make_request(request_queue_ - if (!q->front_merge_fn(q, req, bio)) - break; - -+ blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); -+ - bio->bi_next = req->bio; - req->bio = bio; - -@@ -2705,6 +2722,8 @@ static int __make_request(request_queue_ - } - - get_rq: -+ blk_add_trace_bio(q, bio, BLK_TA_QUEUE); -+ - /* - * Grab a free request. This is might sleep but can not fail. - * Returns with the queue unlocked. -@@ -2981,6 +3000,10 @@ end_io: - blk_partition_remap(bio); - - ret = q->make_request_fn(q, bio); -+ -+ if (ret) -+ blk_add_trace_bio(q, bio, BLK_TA_QUEUE); -+ - } while (ret); - } - -@@ -3099,6 +3122,8 @@ static int __end_that_request_first(stru - int total_bytes, bio_nbytes, error, next_idx = 0; - struct bio *bio; - -+ blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); -+ - /* - * extend uptodate bool to allow < 0 value to be direct io error - */ -diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/include/linux/blkdev.h linux-2.6.13-rc6-mm2/include/linux/blkdev.h ---- /opt/kernel/linux-2.6.13-rc6-mm2/include/linux/blkdev.h 2005-08-24 13:17:35.000000000 +0200 -+++ linux-2.6.13-rc6-mm2/include/linux/blkdev.h 2005-08-24 11:52:14.000000000 +0200 -@@ -22,6 +22,7 @@ typedef struct request_queue request_que - struct elevator_queue; - typedef struct elevator_queue elevator_t; - struct request_pm_state; -+struct blk_trace; - - #define BLKDEV_MIN_RQ 4 - #define BLKDEV_MAX_RQ 128 /* Default maximum */ -@@ -412,6 +413,8 @@ struct request_queue - */ - struct request *flush_rq; - unsigned char ordered; -+ -+ struct blk_trace *blk_trace; - }; - - enum { -diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/include/linux/blktrace.h linux-2.6.13-rc6-mm2/include/linux/blktrace.h ---- /opt/kernel/linux-2.6.13-rc6-mm2/include/linux/blktrace.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-2.6.13-rc6-mm2/include/linux/blktrace.h 2005-08-24 13:22:24.000000000 +0200 -@@ -0,0 +1,145 @@ -+#ifndef BLKTRACE_H -+#define BLKTRACE_H -+ -+#include -+#include -+#include -+ -+/* -+ * Trace categories -+ */ -+enum { -+ BLK_TC_READ = 1 << 0, /* reads */ -+ BLK_TC_WRITE = 1 << 1, /* writes */ -+ BLK_TC_BARRIER = 1 << 2, /* barrier */ -+ BLK_TC_SYNC = 1 << 3, /* barrier */ -+ BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ -+ BLK_TC_REQUEUE = 1 << 5, /* requeueing */ -+ BLK_TC_ISSUE = 1 << 6, /* issue */ -+ BLK_TC_COMPLETE = 1 << 7, /* completions */ -+ BLK_TC_FS = 1 << 8, /* fs requests */ -+ BLK_TC_PC = 1 << 9, /* pc requests */ -+ -+ BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ -+}; -+ -+#define BLK_TC_SHIFT (16) -+#define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT) -+ -+/* -+ * Basic trace actions -+ */ -+enum { -+ __BLK_TA_QUEUE = 1, /* queued */ -+ __BLK_TA_BACKMERGE, /* back merged to existing rq */ -+ __BLK_TA_FRONTMERGE, /* front merge to existing rq */ -+ __BLK_TA_GETRQ, /* allocated new request */ -+ __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */ -+ __BLK_TA_REQUEUE, /* request requeued */ -+ __BLK_TA_ISSUE, /* sent to driver */ -+ __BLK_TA_COMPLETE, /* completed by driver */ -+}; -+ -+/* -+ * Trace actions in full. Additionally, read or write is masked -+ */ -+#define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE)) -+#define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) -+#define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) -+#define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE)) -+#define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE)) -+#define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE)) -+#define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE)) -+#define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE)) -+ -+#define BLK_IO_TRACE_MAGIC 0x65617400 -+#define BLK_IO_TRACE_VERSION 0x02 -+ -+/* -+ * The trace itself -+ */ -+struct blk_io_trace { -+ u32 magic; /* MAGIC << 8 | version */ -+ u32 sequence; /* event number */ -+ u64 time; /* in microseconds */ -+ u64 sector; /* disk offset */ -+ u32 bytes; /* transfer length */ -+ u32 action; /* what happened */ -+ u32 pid; /* who did it */ -+ u16 error; /* completion error */ -+ u16 pdu_len; /* length of data after this trace */ -+}; -+ -+struct blk_trace { -+ struct rchan *rchan; -+ atomic_t sequence; -+ u16 act_mask; -+}; -+ -+/* -+ * User setup structure passed with BLKSTARTTRACE -+ */ -+struct blk_user_trace_setup { -+ char name[BDEVNAME_SIZE]; /* output */ -+ u16 act_mask; /* input */ -+ u32 buf_size; /* input */ -+ u32 buf_nr; /* input */ -+}; -+ -+#if defined(CONFIG_BLK_DEV_IO_TRACE) -+extern int blk_start_trace(struct block_device *, char __user *); -+extern int blk_stop_trace(struct block_device *); -+extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, char *); -+ -+static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq, -+ u32 what) -+{ -+ struct blk_trace *bt = q->blk_trace; -+ int rw = rq->flags & 0x07; -+ -+ if (likely(!bt)) -+ return; -+ -+ if (blk_pc_request(rq)) { -+ what |= BLK_TC_ACT(BLK_TC_PC); -+ __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd); -+ } else { -+ what |= BLK_TC_ACT(BLK_TC_FS); -+ __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL); -+ } -+} -+ -+static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio, -+ u32 what) -+{ -+ struct blk_trace *bt = q->blk_trace; -+ -+ if (likely(!bt)) -+ return; -+ -+ __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL); -+} -+ -+static inline void blk_add_trace_generic(struct request_queue *q, -+ struct bio *bio, int rw, u32 what) -+{ -+ struct blk_trace *bt = q->blk_trace; -+ -+ if (likely(!bt)) -+ return; -+ -+ if (bio) -+ blk_add_trace_bio(q, bio, what); -+ else -+ __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL); -+} -+ -+#else /* !CONFIG_BLK_DEV_IO_TRACE */ -+#define blk_start_trace(bdev, arg) (-EINVAL) -+#define blk_stop_trace(bdev) (-EINVAL) -+#define blk_add_trace_rq(q, rq, what) do { } while (0) -+#define blk_add_trace_bio(q, rq, what) do { } while (0) -+#define blk_add_trace_generic(q, rq, rw, what) do { } while (0) -+#endif /* CONFIG_BLK_DEV_IO_TRACE */ -+ -+#endif -diff -urpN -X linux-2.6.13-rc6-mm2/Documentation/dontdiff /opt/kernel/linux-2.6.13-rc6-mm2/include/linux/fs.h linux-2.6.13-rc6-mm2/include/linux/fs.h ---- /opt/kernel/linux-2.6.13-rc6-mm2/include/linux/fs.h 2005-08-24 13:17:35.000000000 +0200 -+++ linux-2.6.13-rc6-mm2/include/linux/fs.h 2005-08-24 11:52:14.000000000 +0200 -@@ -196,6 +196,8 @@ extern int dir_notify_enable; - #define BLKBSZGET _IOR(0x12,112,size_t) - #define BLKBSZSET _IOW(0x12,113,size_t) - #define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */ -+#define BLKSTARTTRACE _IOWR(0x12,115,struct blk_user_trace_setup) -+#define BLKSTOPTRACE _IO(0x12,116) - - #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ - #define FIBMAP _IO(0x00,1) /* bmap access */ diff --git a/kernel/relayfs-read-update-2.6.13-rc6-mm2 b/kernel/relayfs-read-update-2.6.13-rc6-mm2 deleted file mode 100644 index b1bd088..0000000 --- a/kernel/relayfs-read-update-2.6.13-rc6-mm2 +++ /dev/null @@ -1,413 +0,0 @@ -Hi, - -The current relayfs read implementation works fine, but was designed -to be used mainly for 'draining' the buffer after a tracing run. It -turns out that people really want to be able to read from the buffer -during a live trace, for example the blktrace application submitted -recently: - -http://marc.theaimsgroup.com/?l=linux-kernel&m=112480046405961&w=2 - -Here's an improved read implementation for relayfs which allows for -that. - -This version has been tested pretty thoroughly, using both the -blktrace application and a new example I added to the relay-apps -tarball called 'readtest' which is basically a unit test for the read -functionality. All the tests I've come up with have passed and it -looks pretty solid at this point. Here's a link to the test code: - -http://prdownloads.sourceforge.net/relayfs/relay-apps-0.8.tar.gz?download - -Andrew, please apply. - -Thanks, - -Tom - - -Signed-off-by: Tom Zanussi - -diff -urpN -X dontdiff linux-2.6.13-rc6-mm2/Documentation/filesystems/relayfs.txt linux-2.6.13-rc6-mm2-cur/Documentation/filesystems/relayfs.txt ---- linux-2.6.13-rc6-mm2/Documentation/filesystems/relayfs.txt 2005-08-25 19:28:59.000000000 -0500 -+++ linux-2.6.13-rc6-mm2-cur/Documentation/filesystems/relayfs.txt 2005-08-25 17:07:48.000000000 -0500 -@@ -82,10 +82,15 @@ mmap() results in channel buffer being - memory space. Note that you can't do a partial mmap - you must - map the entire file, which is NRBUF * SUBBUFSIZE. - --read() read the contents of a channel buffer. If there are active -- channel writers, results may be unpredictable - users should -- make sure that all logging to the channel has ended before -- using read(). -+read() read the contents of a channel buffer. The bytes read are -+ 'consumed' by the reader i.e. they won't be available again -+ to subsequent reads. If the channel is being used in -+ no-overwrite mode (the default), it can be read at any time -+ even if there's an active kernel writer. If the channel is -+ being used in overwrite mode and there are active channel -+ writers, results may be unpredictable - users should make -+ sure that all logging to the channel has ended before using -+ read() with overwrite mode. - - poll() POLLIN/POLLRDNORM/POLLERR supported. User applications are - notified when sub-buffer boundaries are crossed. -@@ -256,8 +261,8 @@ consulted. - - The default subbuf_start() implementation, used if the client doesn't - define any callbacks, or doesn't define the subbuf_start() callback, --implements the simplest possible 'overwrite' mode i.e. it does nothing --but return 1. -+implements the simplest possible 'no-overwrite' mode i.e. it does -+nothing but return 0. - - Header information can be reserved at the beginning of each sub-buffer - by calling the subbuf_start_reserve() helper function from within the -diff -urpN -X dontdiff linux-2.6.13-rc6-mm2/fs/relayfs/inode.c linux-2.6.13-rc6-mm2-cur/fs/relayfs/inode.c ---- linux-2.6.13-rc6-mm2/fs/relayfs/inode.c 2005-08-25 19:29:02.000000000 -0500 -+++ linux-2.6.13-rc6-mm2-cur/fs/relayfs/inode.c 2005-08-25 18:21:31.000000000 -0500 -@@ -295,101 +295,143 @@ static int relayfs_release(struct inode - } - - /** -- * relayfs_read_start - find the first available byte to read -- * -- * If the read_pos is in the middle of padding, return the -- * position of the first actually available byte, otherwise -- * return the original value. -+ * relayfs_read_consume - update the consumed count for the buffer - */ --static inline size_t relayfs_read_start(size_t read_pos, -- size_t avail, -- size_t start_subbuf, -- struct rchan_buf *buf) -+static void relayfs_read_consume(struct rchan_buf *buf, -+ size_t read_pos, -+ size_t bytes_consumed) - { -- size_t read_subbuf, adj_read_subbuf; -- size_t padding, padding_start, padding_end; - size_t subbuf_size = buf->chan->subbuf_size; - size_t n_subbufs = buf->chan->n_subbufs; -+ size_t read_subbuf; - -- read_subbuf = read_pos / subbuf_size; -- adj_read_subbuf = (read_subbuf + start_subbuf) % n_subbufs; -+ if (buf->bytes_consumed + bytes_consumed > subbuf_size) { -+ relay_subbufs_consumed(buf->chan, buf->cpu, 1); -+ buf->bytes_consumed = 0; -+ } - -- if ((read_subbuf + 1) * subbuf_size <= avail) { -- padding = buf->padding[adj_read_subbuf]; -- padding_start = (read_subbuf + 1) * subbuf_size - padding; -- padding_end = (read_subbuf + 1) * subbuf_size; -- if (read_pos >= padding_start && read_pos < padding_end) { -- read_subbuf = (read_subbuf + 1) % n_subbufs; -- read_pos = read_subbuf * subbuf_size; -+ buf->bytes_consumed += bytes_consumed; -+ read_subbuf = read_pos / buf->chan->subbuf_size; -+ if (buf->bytes_consumed + buf->padding[read_subbuf] == subbuf_size) { -+ if ((read_subbuf == buf->subbufs_produced % n_subbufs) && -+ (buf->offset == subbuf_size)) -+ return; -+ relay_subbufs_consumed(buf->chan, buf->cpu, 1); -+ buf->bytes_consumed = 0; -+ } -+} -+ -+/** -+ * relayfs_read_avail - boolean, are there unconsumed bytes available? -+ */ -+static int relayfs_read_avail(struct rchan_buf *buf, size_t read_pos) -+{ -+ size_t bytes_produced, bytes_consumed, write_offset; -+ size_t subbuf_size = buf->chan->subbuf_size; -+ size_t n_subbufs = buf->chan->n_subbufs; -+ size_t produced = buf->subbufs_produced % n_subbufs; -+ size_t consumed = buf->subbufs_consumed % n_subbufs; -+ -+ write_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset; -+ -+ if (consumed > produced) { -+ if ((produced > n_subbufs) && -+ (produced + n_subbufs - consumed <= n_subbufs)) -+ produced += n_subbufs; -+ } else if (consumed == produced) { -+ if (buf->offset > subbuf_size) { -+ produced += n_subbufs; -+ if (buf->subbufs_produced == buf->subbufs_consumed) -+ consumed += n_subbufs; - } - } - -- return read_pos; -+ if (buf->offset > subbuf_size) -+ bytes_produced = (produced - 1) * subbuf_size + write_offset; -+ else -+ bytes_produced = produced * subbuf_size + write_offset; -+ bytes_consumed = consumed * subbuf_size + buf->bytes_consumed; -+ -+ if (bytes_produced == bytes_consumed) -+ return 0; -+ -+ relayfs_read_consume(buf, read_pos, 0); -+ -+ return 1; - } - - /** -- * relayfs_read_end - return the end of available bytes to read -- * -- * If the read_pos is in the middle of a full sub-buffer, return -- * the padding-adjusted end of that sub-buffer, otherwise return -- * the position after the last byte written to the buffer. At -- * most, 1 sub-buffer can be read at a time. -- * -+ * relayfs_read_subbuf_avail - return bytes available in sub-buffer - */ --static inline size_t relayfs_read_end(size_t read_pos, -- size_t avail, -- size_t start_subbuf, -- struct rchan_buf *buf) -+static size_t relayfs_read_subbuf_avail(size_t read_pos, -+ struct rchan_buf *buf) - { -- size_t padding, read_endpos, buf_offset; -- size_t read_subbuf, adj_read_subbuf; -+ size_t padding, avail = 0; -+ size_t read_subbuf, read_offset, write_subbuf, write_offset; - size_t subbuf_size = buf->chan->subbuf_size; -- size_t n_subbufs = buf->chan->n_subbufs; - -- buf_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset; -+ write_subbuf = (buf->data - buf->start) / subbuf_size; -+ write_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset; - read_subbuf = read_pos / subbuf_size; -- adj_read_subbuf = (read_subbuf + start_subbuf) % n_subbufs; -+ read_offset = read_pos % subbuf_size; -+ padding = buf->padding[read_subbuf]; - -- if ((read_subbuf + 1) * subbuf_size <= avail) { -- padding = buf->padding[adj_read_subbuf]; -- read_endpos = (read_subbuf + 1) * subbuf_size - padding; -+ if (read_subbuf == write_subbuf) { -+ if (read_offset + padding < write_offset) -+ avail = write_offset - (read_offset + padding); - } else -- read_endpos = read_subbuf * subbuf_size + buf_offset; -+ avail = (subbuf_size - padding) - read_offset; - -- return read_endpos; -+ return avail; - } - - /** -- * relayfs_read_avail - return total available along with buffer start -- * -- * Because buffers are circular, the 'beginning' of the buffer -- * depends on where the buffer was last written. If the writer -- * has cycled around the buffer, the beginning is defined to be -- * the beginning of the sub-buffer following the last sub-buffer -- * written to, otherwise it's the beginning of sub-buffer 0. -+ * relayfs_read_start_pos - find the first available byte to read - * -+ * If the read_pos is in the middle of padding, return the -+ * position of the first actually available byte, otherwise -+ * return the original value. - */ --static inline size_t relayfs_read_avail(struct rchan_buf *buf, -- size_t *start_subbuf) -+static size_t relayfs_read_start_pos(size_t read_pos, -+ struct rchan_buf *buf) - { -- size_t avail, complete_subbufs, cur_subbuf, buf_offset; -+ size_t read_subbuf, padding, padding_start, padding_end; - size_t subbuf_size = buf->chan->subbuf_size; - size_t n_subbufs = buf->chan->n_subbufs; -+ -+ read_subbuf = read_pos / subbuf_size; -+ padding = buf->padding[read_subbuf]; -+ padding_start = (read_subbuf + 1) * subbuf_size - padding; -+ padding_end = (read_subbuf + 1) * subbuf_size; -+ if (read_pos >= padding_start && read_pos < padding_end) { -+ read_subbuf = (read_subbuf + 1) % n_subbufs; -+ read_pos = read_subbuf * subbuf_size; -+ } - -- buf_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset; -+ return read_pos; -+} - -- if (buf->subbufs_produced >= n_subbufs) { -- complete_subbufs = n_subbufs - 1; -- cur_subbuf = (buf->data - buf->start) / subbuf_size; -- *start_subbuf = (cur_subbuf + 1) % n_subbufs; -- } else { -- complete_subbufs = buf->subbufs_produced; -- *start_subbuf = 0; -- } -+/** -+ * relayfs_read_end_pos - return the new read position -+ */ -+static size_t relayfs_read_end_pos(struct rchan_buf *buf, -+ size_t read_pos, -+ size_t count) -+{ -+ size_t read_subbuf, padding, end_pos; -+ size_t subbuf_size = buf->chan->subbuf_size; -+ size_t n_subbufs = buf->chan->n_subbufs; - -- avail = complete_subbufs * subbuf_size + buf_offset; -+ read_subbuf = read_pos / subbuf_size; -+ padding = buf->padding[read_subbuf]; -+ if (read_pos % subbuf_size + count + padding == subbuf_size) -+ end_pos = (read_subbuf + 1) * subbuf_size; -+ else -+ end_pos = read_pos + count; -+ if (end_pos >= subbuf_size * n_subbufs) -+ end_pos = 0; - -- return avail; -+ return end_pos; - } - - /** -@@ -401,13 +443,6 @@ static inline size_t relayfs_read_avail( - * - * Reads count bytes or the number of bytes available in the - * current sub-buffer being read, whichever is smaller. -- * -- * NOTE: The results of reading a relayfs file which is currently -- * being written to are undefined. This is because the buffer is -- * circular and an active writer in the kernel could be -- * overwriting the data currently being read. Therefore read() -- * is mainly useful for reading the contents of a buffer after -- * logging has completed. - */ - static ssize_t relayfs_read(struct file *filp, - char __user *buffer, -@@ -416,33 +451,30 @@ static ssize_t relayfs_read(struct file - { - struct inode *inode = filp->f_dentry->d_inode; - struct rchan_buf *buf = RELAYFS_I(inode)->buf; -- size_t read_start, read_end, avail, start_subbuf; -- size_t buf_size = buf->chan->subbuf_size * buf->chan->n_subbufs; -+ size_t read_start, avail; -+ ssize_t ret = 0; - void *from; - -- avail = relayfs_read_avail(buf, &start_subbuf); -- if (*ppos >= avail) -- return 0; -- -- read_start = relayfs_read_start(*ppos, avail, start_subbuf, buf); -- if (read_start == 0 && *ppos) -- return 0; -- -- read_end = relayfs_read_end(read_start, avail, start_subbuf, buf); -- if (read_end == read_start) -- return 0; -- -- from = buf->start + start_subbuf * buf->chan->subbuf_size + read_start; -- if (from >= buf->start + buf_size) -- from -= buf_size; -- -- count = min(count, read_end - read_start); -- if (copy_to_user(buffer, from, count)) -- return -EFAULT; -- -- *ppos = read_start + count; -- -- return count; -+ down(&inode->i_sem); -+ if(!relayfs_read_avail(buf, *ppos)) -+ goto out; -+ -+ read_start = relayfs_read_start_pos(*ppos, buf); -+ avail = relayfs_read_subbuf_avail(read_start, buf); -+ if (!avail) -+ goto out; -+ -+ from = buf->start + read_start; -+ ret = count = min(count, avail); -+ if (copy_to_user(buffer, from, count)) { -+ ret = -EFAULT; -+ goto out; -+ } -+ relayfs_read_consume(buf, read_start, count); -+ *ppos = relayfs_read_end_pos(buf, read_start, count); -+out: -+ up(&inode->i_sem); -+ return ret; - } - - /** -@@ -481,6 +513,7 @@ struct file_operations relayfs_file_oper - .poll = relayfs_poll, - .mmap = relayfs_mmap, - .read = relayfs_read, -+ .llseek = no_llseek, - .release = relayfs_release, - }; - -diff -urpN -X dontdiff linux-2.6.13-rc6-mm2/fs/relayfs/relay.c linux-2.6.13-rc6-mm2-cur/fs/relayfs/relay.c ---- linux-2.6.13-rc6-mm2/fs/relayfs/relay.c 2005-08-25 19:29:02.000000000 -0500 -+++ linux-2.6.13-rc6-mm2-cur/fs/relayfs/relay.c 2005-08-25 21:12:29.000000000 -0500 -@@ -58,6 +58,9 @@ static int subbuf_start_default_callback - void *prev_subbuf, - size_t prev_padding) - { -+ if (relay_buf_full(buf)) -+ return 0; -+ - return 1; - } - -@@ -120,6 +123,7 @@ static inline void __relay_reset(struct - - buf->subbufs_produced = 0; - buf->subbufs_consumed = 0; -+ buf->bytes_consumed = 0; - buf->finalized = 0; - buf->data = buf->start; - buf->offset = 0; -@@ -262,6 +266,7 @@ struct rchan *relay_open(const char *bas - for_each_online_cpu(i) { - sprintf(tmpname, "%s%d", base_filename, i); - chan->buf[i] = relay_open_buf(chan, tmpname, parent); -+ chan->buf[i]->cpu = i; - if (!chan->buf[i]) - goto free_bufs; - } -@@ -328,7 +333,7 @@ size_t relay_switch_subbuf(struct rchan_ - return length; - - toobig: -- printk(KERN_WARNING "relayfs: event too large (%u)\n", length); -+ printk(KERN_WARNING "relayfs: event too large (%Zd)\n", length); - WARN_ON(1); - return 0; - } -diff -urpN -X dontdiff linux-2.6.13-rc6-mm2/include/linux/relayfs_fs.h linux-2.6.13-rc6-mm2-cur/include/linux/relayfs_fs.h ---- linux-2.6.13-rc6-mm2/include/linux/relayfs_fs.h 2005-08-25 19:29:03.000000000 -0500 -+++ linux-2.6.13-rc6-mm2-cur/include/linux/relayfs_fs.h 2005-08-24 00:16:37.000000000 -0500 -@@ -22,7 +22,7 @@ - /* - * Tracks changes to rchan_buf struct - */ --#define RELAYFS_CHANNEL_VERSION 4 -+#define RELAYFS_CHANNEL_VERSION 5 - - /* - * Per-cpu relay channel buffer -@@ -44,6 +44,8 @@ struct rchan_buf - unsigned int finalized; /* buffer has been finalized */ - size_t *padding; /* padding counts per sub-buffer */ - size_t prev_padding; /* temporary variable */ -+ size_t bytes_consumed; /* bytes consumed in cur read subbuf */ -+ unsigned int cpu; /* this buf's cpu */ - } ____cacheline_aligned; - - /* - -