#include <sys/param.h>
#include <sys/statfs.h>
#include <sys/poll.h>
+#include <sys/mman.h>
+#include <sys/socket.h>
#include <stdio.h>
#include <stdlib.h>
#include <sched.h>
#include <ctype.h>
#include <getopt.h>
#include <errno.h>
-#include <assert.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <sys/sendfile.h>
#include "blktrace.h"
+#include "barrier.h"
static char blktrace_version[] = "0.99";
#define RELAYFS_TYPE 0xF0B4A981
-#define RING_INIT_NR (2)
-#define RING_MAX_NR (16UL)
-
-#define S_OPTS "d:a:A:r:o:kw:Vb:n:D:"
+#define S_OPTS "d:a:A:r:o:kw:Vb:n:D:lh:p:"
static struct option l_opts[] = {
{
.name = "dev",
.flag = NULL,
.val = 'D'
},
+ {
+ .name = "listen",
+ .has_arg = no_argument,
+ .flag = NULL,
+ .val = 'l'
+ },
+ {
+ .name = "host",
+ .has_arg = required_argument,
+ .flag = NULL,
+ .val = 'h'
+ },
+ {
+ .name = "port",
+ .has_arg = required_argument,
+ .flag = NULL,
+ .val = 'p'
+ },
{
.name = NULL,
}
};
+struct tip_subbuf {
+ void *buf;
+ unsigned int len;
+ unsigned int max_len;
+};
+
+#define FIFO_SIZE (1024) /* should be plenty big! */
+#define CL_SIZE (128) /* cache line, any bigger? */
+
+struct tip_subbuf_fifo {
+ int tail __attribute__((aligned(CL_SIZE)));
+ int head __attribute__((aligned(CL_SIZE)));
+ struct tip_subbuf *q[FIFO_SIZE];
+};
+
struct thread_information {
int cpu;
pthread_t thread;
int fd;
void *fd_buf;
- unsigned long fd_off;
- unsigned long fd_size;
- unsigned long fd_max_size;
char fn[MAXPATHLEN + 64];
- pthread_mutex_t *fd_lock;
FILE *ofile;
char *ofile_buffer;
+ int ofile_stdout;
+ int ofile_mmap;
unsigned long events_processed;
+ unsigned long long data_read;
struct device_information *device;
+
+ int exited;
+
+ /*
+ * piped fifo buffers
+ */
+ struct tip_subbuf_fifo fifo;
+ struct tip_subbuf *leftover_ts;
+
+ /*
+ * mmap controlled output files
+ */
+ unsigned long long fs_size;
+ unsigned long long fs_max_size;
+ unsigned long fs_off;
+ void *fs_buf;
+ unsigned long fs_buf_len;
};
struct device_information {
static int kill_running_trace;
static unsigned long buf_size = BUF_SIZE;
static unsigned long buf_nr = BUF_NR;
+static unsigned int page_size;
#define is_done() (*(volatile int *)(&done))
static volatile int done;
#define is_stat_shown() (*(volatile int *)(&stat_shown))
static volatile int stat_shown;
-static pthread_mutex_t stdout_mutex = PTHREAD_MUTEX_INITIALIZER;
+int data_is_native = -1;
static void exit_trace(int status);
for (__i = 0, __d = device_information; __i < __e; __i++, __d++)
#define for_each_dip(__d, __i) __for_each_dip(__d, __i, ndevs)
-#define for_each_tip(__d, __t, __i) \
- for (__i = 0, __t = (__d)->threads; __i < ncpus; __i++, __t++)
+#define for_each_tip(__d, __t, __j) \
+ for (__j = 0, __t = (__d)->threads; __j < ncpus; __j++, __t++)
+
+/*
+ * networking stuff follows. we include a magic number so we know whether
+ * to endianness convert or not
+ */
+struct blktrace_net_hdr {
+ u32 magic; /* same as trace magic */
+ char buts_name[32]; /* trace name */
+ u32 cpu; /* for which cpu */
+ u32 max_cpus;
+ u32 len; /* length of following trace data */
+};
+
+#define TRACE_NET_PORT (8462)
+
+enum {
+ Net_none = 0,
+ Net_server,
+ Net_client,
+};
+
+/*
+ * network cmd line params
+ */
+static char hostname[MAXHOSTNAMELEN];
+static int net_port = TRACE_NET_PORT;
+static int net_mode = 0;
+
+static int net_in_fd = -1;
+static int net_out_fd = -1;
+
+static void handle_sigint(__attribute__((__unused__)) int sig)
+{
+ done = 1;
+}
static int get_dropped_count(const char *buts_name)
{
buts.buf_nr = buf_nr;
buts.act_mask = act_mask;
- if (ioctl(dip->fd, BLKSTARTTRACE, &buts) < 0) {
- perror("BLKSTARTTRACE");
+ if (ioctl(dip->fd, BLKTRACESETUP, &buts) < 0) {
+ perror("BLKTRACESETUP");
+ return 1;
+ }
+
+ if (ioctl(dip->fd, BLKTRACESTART) < 0) {
+ perror("BLKTRACESTART");
return 1;
}
if (dip_tracing(dip) || kill_running_trace) {
dip_set_tracing(dip, 0);
- if (ioctl(dip->fd, BLKSTOPTRACE) < 0)
- perror("BLKSTOPTRACE");
+ if (ioctl(dip->fd, BLKTRACESTOP) < 0)
+ perror("BLKTRACESTOP");
+ if (ioctl(dip->fd, BLKTRACETEARDOWN) < 0)
+ perror("BLKTRACETEARDOWN");
close(dip->fd);
dip->fd = -1;
{
struct pollfd pfd = { .fd = tip->fd, .events = POLLIN };
- poll(&pfd, 1, 10);
+ do {
+ poll(&pfd, 1, 100);
+ if (pfd.revents & POLLIN)
+ break;
+ if (tip->ofile_stdout)
+ break;
+ } while (!is_done());
}
-static int __read_data(struct thread_information *tip, void *buf, int len,
- int block)
+static int read_data_file(struct thread_information *tip, void *buf, int len)
{
int ret = 0;
- while (!is_done()) {
- ret = read(tip->fd, buf, len);
- if (ret > 0)
- break;
- else if (!ret) {
- if (!block)
- break;
+ do {
+ wait_for_data(tip);
- wait_for_data(tip);
- } else {
+ ret = read(tip->fd, buf, len);
+ if (!ret)
+ continue;
+ else if (ret > 0)
+ return ret;
+ else {
if (errno != EAGAIN) {
perror(tip->fn);
fprintf(stderr,"Thread %d failed read of %s\n",
tip->cpu, tip->fn);
break;
}
- if (!block) {
- ret = 0;
- break;
- }
-
- wait_for_data(tip);
+ continue;
}
- }
+ } while (!is_done());
return ret;
-}
-
-#define can_grow_ring(tip) ((tip)->fd_max_size < RING_MAX_NR * buf_size * buf_nr)
-
-static int resize_ringbuffer(struct thread_information *tip)
-{
- if (!can_grow_ring(tip))
- return 1;
-
- tip->fd_buf = realloc(tip->fd_buf, 2 * tip->fd_max_size);
-
- /*
- * if the ring currently wraps, copy range over
- */
- if (tip->fd_off + tip->fd_size > tip->fd_max_size) {
- unsigned long wrap_size = tip->fd_size - (tip->fd_max_size - tip->fd_off);
- memmove(tip->fd_buf + tip->fd_max_size, tip->fd_buf, wrap_size);
- }
- tip->fd_max_size <<= 1;
- return 0;
}
-static int __refill_ringbuffer(struct thread_information *tip, int len,
- int block)
+static int read_data_net(struct thread_information *tip, void *buf, int len)
{
- unsigned long off;
- int ret;
-
- off = (tip->fd_size + tip->fd_off) & (tip->fd_max_size - 1);
- if (off + len > tip->fd_max_size)
- len = tip->fd_max_size - off;
+ unsigned int bytes_left = len;
+ int ret = 0;
- assert(len > 0);
+ do {
+ ret = recv(net_in_fd, buf, bytes_left, MSG_WAITALL);
- ret = __read_data(tip, tip->fd_buf + off, len, block);
- if (ret < 0)
- return -1;
+ if (!ret)
+ continue;
+ else if (ret < 0) {
+ if (errno != EAGAIN) {
+ perror(tip->fn);
+ fprintf(stderr, "server: failed read\n");
+ return 0;
+ }
+ continue;
+ } else {
+ buf += ret;
+ bytes_left -= ret;
+ }
+ } while (!is_done() && bytes_left);
- tip->fd_size += ret;
- return ret;
+ return len;
}
-/*
- * keep filling ring until we get a short read
- */
-static void refill_ringbuffer(struct thread_information *tip, int block)
+static int read_data(struct thread_information *tip, void *buf, int len)
{
- int len = buf_size;
- int ret;
-
- do {
- if (len + tip->fd_size > tip->fd_max_size)
- resize_ringbuffer(tip);
+ if (net_mode == Net_server)
+ return read_data_net(tip, buf, len);
- ret = __refill_ringbuffer(tip, len, block);
- } while ((ret = len) && !is_done());
+ return read_data_file(tip, buf, len);
}
-static int read_data(struct thread_information *tip, void *buf,
- unsigned int len)
+static inline struct tip_subbuf *
+subbuf_fifo_dequeue(struct thread_information *tip)
{
- unsigned int start_size, end_size;
+ const int head = tip->fifo.head;
+ const int next = (head + 1) & (FIFO_SIZE - 1);
- refill_ringbuffer(tip, len > tip->fd_size);
+ if (head != tip->fifo.tail) {
+ struct tip_subbuf *ts = tip->fifo.q[head];
- if (len > tip->fd_size)
- return -1;
-
- /*
- * see if we wrap the ring
- */
- start_size = len;
- end_size = 0;
- if (len > (tip->fd_max_size - tip->fd_off)) {
- start_size = tip->fd_max_size - tip->fd_off;
- end_size = len - start_size;
+ store_barrier();
+ tip->fifo.head = next;
+ return ts;
}
- memcpy(buf, tip->fd_buf + tip->fd_off, start_size);
- if (end_size)
- memcpy(buf + start_size, tip->fd_buf, end_size);
-
- tip->fd_off = (tip->fd_off + len) & (tip->fd_max_size - 1);
- tip->fd_size -= len;
- return 0;
+ return NULL;
}
-static int write_data(FILE *file, void *buf, unsigned int buf_len)
+static inline int subbuf_fifo_queue(struct thread_information *tip,
+ struct tip_subbuf *ts)
{
- int ret, bytes_left;
- char *p = buf;
-
- bytes_left = buf_len;
- while (bytes_left > 0) {
- ret = fwrite(p, bytes_left, 1, file);
- if (ret == 1)
- break;
+ const int tail = tip->fifo.tail;
+ const int next = (tail + 1) & (FIFO_SIZE - 1);
- if (ret < 0) {
- perror("write");
- return 1;
- }
+ if (next != tip->fifo.head) {
+ tip->fifo.q[tail] = ts;
+ store_barrier();
+ tip->fifo.tail = next;
+ return 0;
}
- return 0;
-}
-
-static void *extract_data(struct thread_information *tip, int nb)
-{
- unsigned char *buf;
-
- buf = malloc(nb);
- if (!read_data(tip, buf, nb))
- return buf;
-
- free(buf);
- return NULL;
+ fprintf(stderr, "fifo too small!\n");
+ return 1;
}
/*
- * trace may start inside 'bit' or may need to be gotten further on
+ * For file output, truncate and mmap the file appropriately
*/
-static int get_event_slow(struct thread_information *tip,
- struct blk_io_trace *bit)
+static int mmap_subbuf(struct thread_information *tip, unsigned int maxlen)
{
- const int inc = sizeof(__u32);
- struct blk_io_trace foo;
- unsigned int offset;
- void *p;
-
- /*
- * check if trace is inside
- */
- offset = 0;
- p = bit;
- while (offset < sizeof(*bit)) {
- p += inc;
- offset += inc;
-
- memcpy(&foo, p, inc);
-
- if (CHECK_MAGIC(&foo))
- break;
- }
+ int ofd = fileno(tip->ofile);
+ int ret;
/*
- * part trace found inside, read the rest
+ * extend file, if we have to. use chunks of 16 subbuffers.
*/
- if (offset < sizeof(*bit)) {
- int good_bytes = sizeof(*bit) - offset;
+ if (tip->fs_off + buf_size > tip->fs_buf_len) {
+ if (tip->fs_buf) {
+ munlock(tip->fs_buf, tip->fs_buf_len);
+ munmap(tip->fs_buf, tip->fs_buf_len);
+ tip->fs_buf = NULL;
+ }
- memmove(bit, p, good_bytes);
- p = (void *) bit + good_bytes;
+ tip->fs_off = tip->fs_size & (page_size - 1);
+ tip->fs_buf_len = (16 * buf_size) - tip->fs_off;
+ tip->fs_max_size += tip->fs_buf_len;
- return read_data(tip, p, offset);
- }
+ if (ftruncate(ofd, tip->fs_max_size) < 0) {
+ perror("ftruncate");
+ return -1;
+ }
- /*
- * nothing found, keep looking for start of trace
- */
- do {
- if (read_data(tip, bit, sizeof(bit->magic)))
+ tip->fs_buf = mmap(NULL, tip->fs_buf_len, PROT_WRITE,
+ MAP_SHARED, ofd, tip->fs_size - tip->fs_off);
+ if (tip->fs_buf == MAP_FAILED) {
+ perror("mmap");
return -1;
- } while (!CHECK_MAGIC(bit));
+ }
+ mlock(tip->fs_buf, tip->fs_buf_len);
+ }
- /*
- * now get the rest of it
- */
- p = &bit->sequence;
- if (read_data(tip, p, sizeof(*bit) - inc))
- return -1;
+ ret = read_data(tip, tip->fs_buf + tip->fs_off, maxlen);
+ if (ret >= 0) {
+ tip->data_read += ret;
+ tip->fs_size += ret;
+ tip->fs_off += ret;
+ return 0;
+ }
- return 0;
+ return -1;
}
/*
- * Sometimes relayfs screws us a little, if an event crosses a sub buffer
- * boundary. So keep looking forward in the trace data until an event
- * is found
+ * Use the copy approach for pipes and network
*/
-static int get_event(struct thread_information *tip, struct blk_io_trace *bit)
+static int get_subbuf(struct thread_information *tip)
{
- /*
- * optimize for the common fast case, a full trace read that
- * succeeds
- */
- if (read_data(tip, bit, sizeof(*bit)))
- return -1;
+ struct tip_subbuf *ts = malloc(sizeof(*ts));
+ int ret;
- if (CHECK_MAGIC(bit))
- return 0;
+ ts->buf = malloc(buf_size);
+ ts->max_len = buf_size;
- /*
- * ok that didn't work, the event may start somewhere inside the
- * trace itself
- */
- return get_event_slow(tip, bit);
-}
-
-static inline void tip_fd_unlock(struct thread_information *tip)
-{
- if (tip->fd_lock)
- pthread_mutex_unlock(tip->fd_lock);
-}
+ ret = read_data(tip, ts->buf, ts->max_len);
+ if (ret > 0) {
+ ts->len = ret;
+ return subbuf_fifo_queue(tip, ts);
+ }
-static inline void tip_fd_lock(struct thread_information *tip)
-{
- if (tip->fd_lock)
- pthread_mutex_lock(tip->fd_lock);
+ return ret;
}
static void close_thread(struct thread_information *tip)
tip->fd_buf = NULL;
}
-static void *extract(void *arg)
+static void tip_ftrunc_final(struct thread_information *tip)
+{
+ /*
+ * truncate to right size and cleanup mmap
+ */
+ if (tip->ofile_mmap) {
+ int ofd = fileno(tip->ofile);
+
+ if (tip->fs_buf)
+ munmap(tip->fs_buf, tip->fs_buf_len);
+
+ ftruncate(ofd, tip->fs_size);
+ }
+}
+
+static void *thread_main(void *arg)
{
struct thread_information *tip = arg;
- int pdu_len;
- char *pdu_data;
- struct blk_io_trace t;
pid_t pid = getpid();
cpu_set_t cpu_mask;
snprintf(tip->fn, sizeof(tip->fn), "%s/block/%s/trace%d",
relay_path, tip->device->buts_name, tip->cpu);
- tip->fd = open(tip->fn, O_RDONLY | O_NONBLOCK);
+ tip->fd = open(tip->fn, O_RDONLY);
if (tip->fd < 0) {
perror(tip->fn);
fprintf(stderr,"Thread %d failed open of %s\n", tip->cpu,
exit_trace(1);
}
- /*
- * start with a ringbuffer that is twice the size of the kernel side
- */
- tip->fd_max_size = buf_size * buf_nr * RING_INIT_NR;
- tip->fd_buf = malloc(tip->fd_max_size);
- tip->fd_off = 0;
- tip->fd_size = 0;
+ while (!is_done()) {
+ if (tip->ofile_mmap && net_mode != Net_client) {
+ if (mmap_subbuf(tip, buf_size))
+ break;
+ } else {
+ if (get_subbuf(tip))
+ break;
+ }
+ }
+
+ tip_ftrunc_final(tip);
+ tip->exited = 1;
+ return NULL;
+}
+
+static int write_data_net(int fd, void *buf, unsigned int buf_len)
+{
+ unsigned int bytes_left = buf_len;
+ int ret;
+
+ while (bytes_left) {
+ ret = send(fd, buf, bytes_left, 0);
+ if (ret < 0) {
+ perror("send");
+ return 1;
+ }
+
+ buf += ret;
+ bytes_left -= ret;
+ }
+
+ return 0;
+}
+
+static int flush_subbuf_net(struct thread_information *tip,
+ struct tip_subbuf *ts)
+{
+ struct blktrace_net_hdr hdr;
+
+ hdr.magic = BLK_IO_TRACE_MAGIC;
+ strcpy(hdr.buts_name, tip->device->buts_name);
+ hdr.cpu = tip->cpu;
+ hdr.max_cpus = ncpus;
+ hdr.len = ts->len;
+
+ if (write_data_net(net_out_fd, &hdr, sizeof(hdr)))
+ return 1;
+
+ if (write_data_net(net_out_fd, ts->buf, ts->len))
+ return 1;
+
+ free(ts->buf);
+ free(ts);
+ return 0;
+}
+
+static int write_data(struct thread_information *tip, void *buf,
+ unsigned int buf_len)
+{
+ int ret;
+
+ if (!buf_len)
+ return 0;
- pdu_data = NULL;
while (1) {
- if (get_event(tip, &t))
+ ret = fwrite(buf, buf_len, 1, tip->ofile);
+ if (ret == 1)
break;
- if (verify_trace(&t))
- break;
+ if (ret < 0) {
+ perror("write");
+ return 1;
+ }
+ }
- pdu_len = t.pdu_len;
+ if (tip->ofile_stdout)
+ fflush(tip->ofile);
- trace_to_be(&t);
+ return 0;
+}
- if (pdu_len) {
- pdu_data = extract_data(tip, pdu_len);
- if (!pdu_data)
- break;
+static int flush_subbuf_file(struct thread_information *tip,
+ struct tip_subbuf *ts)
+{
+ unsigned int offset = 0;
+ struct blk_io_trace *t;
+ int pdu_len, events = 0;
+
+ /*
+ * surplus from last run
+ */
+ if (tip->leftover_ts) {
+ struct tip_subbuf *prev_ts = tip->leftover_ts;
+
+ if (prev_ts->len + ts->len > prev_ts->max_len) {
+ prev_ts->max_len += ts->len;
+ prev_ts->buf = realloc(prev_ts->buf, prev_ts->max_len);
}
- /*
- * now we have both trace and payload, get a lock on the
- * output descriptor and send it off
- */
- tip_fd_lock(tip);
+ memcpy(prev_ts->buf + prev_ts->len, ts->buf, ts->len);
+ prev_ts->len += ts->len;
- if (write_data(tip->ofile, &t, sizeof(t))) {
- tip_fd_unlock(tip);
- break;
+ free(ts->buf);
+ free(ts);
+
+ ts = prev_ts;
+ tip->leftover_ts = NULL;
+ }
+
+ while (offset + sizeof(*t) <= ts->len) {
+ t = ts->buf + offset;
+
+ if (verify_trace(t)) {
+ write_data(tip, ts->buf, offset);
+ return -1;
}
- if (pdu_data && write_data(tip->ofile, pdu_data, pdu_len)) {
- tip_fd_unlock(tip);
+ pdu_len = t->pdu_len;
+
+ if (offset + sizeof(*t) + pdu_len > ts->len)
break;
+
+ offset += sizeof(*t) + pdu_len;
+ tip->events_processed++;
+ tip->data_read += sizeof(*t) + pdu_len;
+ events++;
+ }
+
+ if (write_data(tip, ts->buf, offset))
+ return -1;
+
+ /*
+ * leftover bytes, save them for next time
+ */
+ if (offset != ts->len) {
+ tip->leftover_ts = ts;
+ ts->len -= offset;
+ memmove(ts->buf, ts->buf + offset, ts->len);
+ } else {
+ free(ts->buf);
+ free(ts);
+ }
+
+ return events;
+}
+
+static int write_tip_events(struct thread_information *tip)
+{
+ struct tip_subbuf *ts = subbuf_fifo_dequeue(tip);
+
+ if (ts) {
+ if (net_mode == Net_client)
+ return flush_subbuf_net(tip, ts);
+
+ return flush_subbuf_file(tip, ts);
+ }
+
+ return 0;
+}
+
+/*
+ * scans the tips we know and writes out the subbuffers we accumulate
+ */
+static void get_and_write_events(void)
+{
+ struct device_information *dip;
+ struct thread_information *tip;
+ int i, j, events, ret, tips_running;
+
+ while (!is_done()) {
+ events = 0;
+
+ for_each_dip(dip, i) {
+ for_each_tip(dip, tip, j) {
+ ret = write_tip_events(tip);
+ if (ret > 0)
+ events += ret;
+ }
}
- tip_fd_unlock(tip);
+ if (!events)
+ usleep(10);
+ }
- if (pdu_data) {
- free(pdu_data);
- pdu_data = NULL;
+ /*
+ * reap stored events
+ */
+ do {
+ events = 0;
+ tips_running = 0;
+ for_each_dip(dip, i) {
+ for_each_tip(dip, tip, j) {
+ ret = write_tip_events(tip);
+ if (ret > 0)
+ events += ret;
+ tips_running += !tip->exited;
+ }
}
+ usleep(10);
+ } while (events || tips_running);
+}
- tip->events_processed++;
+static void wait_for_threads(void)
+{
+ /*
+ * for piped or network output, poll and fetch data for writeout.
+ * for files, we just wait around for trace threads to exit
+ */
+ if ((output_name && !strcmp(output_name, "-")) ||
+ net_mode == Net_client)
+ get_and_write_events();
+ else {
+ struct device_information *dip;
+ struct thread_information *tip;
+ int i, j, tips_running;
+
+ do {
+ tips_running = 0;
+ usleep(1000);
+
+ for_each_dip(dip, i)
+ for_each_tip(dip, tip, j)
+ tips_running += !tip->exited;
+ } while (tips_running);
}
+}
- close_thread(tip);
- return NULL;
+static void fill_ofname(char *dst, char *buts_name, int cpu)
+{
+ int len = 0;
+
+ if (output_dir)
+ len = sprintf(dst, "%s/", output_dir);
+
+ if (output_name)
+ sprintf(dst + len, "%s.blktrace.%d", output_name, cpu);
+ else
+ sprintf(dst + len, "%s.blktrace.%d", buts_name, cpu);
}
static int start_threads(struct device_information *dip)
{
struct thread_information *tip;
- char op[64];
int j, pipeline = output_name && !strcmp(output_name, "-");
- int len, mode;
+ int mode, vbuf_size;
+ char op[64];
for_each_tip(dip, tip, j) {
tip->cpu = j;
tip->device = dip;
- tip->fd_lock = NULL;
tip->events_processed = 0;
+ memset(&tip->fifo, 0, sizeof(tip->fifo));
+ tip->leftover_ts = NULL;
if (pipeline) {
tip->ofile = fdopen(STDOUT_FILENO, "w");
- tip->fd_lock = &stdout_mutex;
+ tip->ofile_stdout = 1;
+ tip->ofile_mmap = 0;
mode = _IOLBF;
- buf_size = 512;
+ vbuf_size = 512;
} else {
- len = 0;
-
- if (output_dir)
- len = sprintf(op, "%s/", output_dir);
-
- if (output_name) {
- sprintf(op + len, "%s.blktrace.%d", output_name,
- tip->cpu);
- } else {
- sprintf(op + len, "%s.blktrace.%d",
- dip->buts_name, tip->cpu);
- }
- tip->ofile = fopen(op, "w");
+ fill_ofname(op, dip->buts_name, tip->cpu);
+ tip->ofile = fopen(op, "w+");
+ tip->ofile_stdout = 0;
+ tip->ofile_mmap = 1;
mode = _IOFBF;
- buf_size = OFILE_BUF;
+ vbuf_size = OFILE_BUF;
}
if (tip->ofile == NULL) {
return 1;
}
- tip->ofile_buffer = malloc(buf_size);
- if (setvbuf(tip->ofile, tip->ofile_buffer, mode, buf_size)) {
+ tip->ofile_buffer = malloc(vbuf_size);
+ if (setvbuf(tip->ofile, tip->ofile_buffer, mode, vbuf_size)) {
perror("setvbuf");
close_thread(tip);
return 1;
}
- if (pthread_create(&tip->thread, NULL, extract, tip)) {
+ if (pthread_create(&tip->thread, NULL, thread_main, tip)) {
perror("pthread_create");
close_thread(tip);
return 1;
unsigned long ret;
int i;
- for_each_tip(dip, tip, i)
+ for_each_tip(dip, tip, i) {
(void) pthread_join(tip->thread, (void *) &ret);
+ close_thread(tip);
+ }
}
static void stop_all_threads(void)
static void show_stats(void)
{
- int i, j, no_stdout = 0;
struct device_information *dip;
struct thread_information *tip;
- unsigned long long events_processed;
+ unsigned long long events_processed, data_read;
unsigned long total_drops;
+ int i, j, no_stdout = 0;
if (is_stat_shown())
return;
- stat_shown = 1;
-
if (output_name && !strcmp(output_name, "-"))
no_stdout = 1;
+ stat_shown = 1;
+
total_drops = 0;
for_each_dip(dip, i) {
if (!no_stdout)
printf("Device: %s\n", dip->path);
events_processed = 0;
+ data_read = 0;
for_each_tip(dip, tip, j) {
if (!no_stdout)
- printf(" CPU%3d: %20ld events\n",
- tip->cpu, tip->events_processed);
+ printf(" CPU%3d: %20lu events, %8llu KiB data\n",
+ tip->cpu, tip->events_processed,
+ tip->data_read >> 10);
events_processed += tip->events_processed;
+ data_read += tip->data_read;
}
total_drops += dip->drop_count;
if (!no_stdout)
- printf(" Total: %20lld events (dropped %lu)\n",
- events_processed, dip->drop_count);
+ printf(" Total: %20llu events (dropped %lu), %8llu KiB data\n",
+ events_processed, dip->drop_count,
+ data_read >> 10);
}
if (total_drops)
fprintf(stderr, "You have dropped events, consider using a larger buffer size (-b)\n");
}
+static struct device_information *net_get_dip(char *buts_name)
+{
+ struct device_information *dip;
+ int i;
+
+ for (i = 0; i < ndevs; i++) {
+ dip = &device_information[i];
+
+ if (!strcmp(dip->buts_name, buts_name))
+ return dip;
+ }
+
+ device_information = realloc(device_information, (ndevs + 1) * sizeof(*dip));
+ dip = &device_information[ndevs];
+ strcpy(dip->buts_name, buts_name);
+ ndevs++;
+ dip->threads = malloc(ncpus * sizeof(struct thread_information));
+ memset(dip->threads, 0, ncpus * sizeof(struct thread_information));
+
+ /*
+ * open all files
+ */
+ for (i = 0; i < ncpus; i++) {
+ struct thread_information *tip = &dip->threads[i];
+ char op[64];
+
+ tip->cpu = i;
+ tip->ofile_stdout = 0;
+ tip->ofile_mmap = 1;
+ tip->device = dip;
+
+ fill_ofname(op, dip->buts_name, tip->cpu);
+
+ tip->ofile = fopen(op, "w+");
+ if (!tip->ofile) {
+ perror("fopen");
+ return NULL;
+ }
+ }
+
+ return dip;
+}
+
+static struct thread_information *net_get_tip(struct blktrace_net_hdr *bnh)
+{
+ struct device_information *dip;
+
+ ncpus = bnh->max_cpus;
+ dip = net_get_dip(bnh->buts_name);
+ return &dip->threads[bnh->cpu];
+}
+
+static int net_get_header(struct blktrace_net_hdr *bnh)
+{
+ int fl = fcntl(net_in_fd, F_GETFL);
+ int bytes_left, ret;
+ void *p = bnh;
+
+ fcntl(net_in_fd, F_SETFL, fl | O_NONBLOCK);
+ bytes_left = sizeof(*bnh);
+ while (bytes_left && !is_done()) {
+ ret = recv(net_in_fd, p, bytes_left, MSG_WAITALL);
+ if (ret < 0) {
+ if (errno != EAGAIN) {
+ perror("recv header");
+ return 1;
+ }
+ usleep(100);
+ continue;
+ } else if (!ret) {
+ usleep(100);
+ continue;
+ } else {
+ p += ret;
+ bytes_left -= ret;
+ }
+ }
+ fcntl(net_in_fd, F_SETFL, fl & ~O_NONBLOCK);
+ return 0;
+}
+
+static int net_server_loop(void)
+{
+ struct thread_information *tip;
+ struct blktrace_net_hdr bnh;
+
+ if (net_get_header(&bnh))
+ return 1;
+
+ if (data_is_native == -1 && check_data_endianness(bnh.magic)) {
+ fprintf(stderr, "server: received data is bad\n");
+ return 1;
+ }
+
+ if (!data_is_native) {
+ bnh.cpu = be32_to_cpu(bnh.cpu);
+ bnh.len = be32_to_cpu(bnh.len);
+ }
+
+ tip = net_get_tip(&bnh);
+ if (!tip)
+ return 1;
+
+ if (mmap_subbuf(tip, bnh.len))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Start here when we are in server mode - just fetch data from the network
+ * and dump to files
+ */
+static int net_server(void)
+{
+ struct sockaddr_in addr;
+ socklen_t socklen;
+ int fd, opt, i, j;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ if (fd < 0) {
+ perror("server: socket");
+ return 1;
+ }
+
+ opt = 1;
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) < 0) {
+ perror("setsockopt");
+ return 1;
+ }
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr.sin_port = htons(net_port);
+
+ if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
+ perror("bind");
+ return 1;
+ }
+
+ if (listen(fd, 1) < 0) {
+ perror("listen");
+ return 1;
+ }
+
+ printf("blktrace: waiting for incoming connection...\n");
+
+ socklen = sizeof(addr);
+ net_in_fd = accept(fd, (struct sockaddr *) &addr, &socklen);
+ if (net_in_fd < 0) {
+ perror("accept");
+ return 1;
+ }
+
+ signal(SIGINT, handle_sigint);
+ signal(SIGHUP, handle_sigint);
+ signal(SIGTERM, handle_sigint);
+ signal(SIGALRM, handle_sigint);
+
+ printf("blktrace: connected!\n");
+
+ while (!is_done()) {
+ if (net_server_loop())
+ break;
+ }
+
+ for (i = 0; i < ndevs; i++) {
+ struct device_information *dip = &device_information[i];
+
+ for (j = 0; j < ncpus; j++)
+ tip_ftrunc_final(&dip->threads[j]);
+ }
+
+ return 0;
+}
+
+/*
+ * Setup outgoing network connection where we will transmit data
+ */
+static int net_setup_client(void)
+{
+ struct sockaddr_in addr;
+ int fd;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ if (fd < 0) {
+ perror("client: socket");
+ return 1;
+ }
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = AF_INET;
+ addr.sin_port = htons(net_port);
+
+ if (inet_aton(hostname, &addr.sin_addr) != 1) {
+ struct hostent *hent = gethostbyname(hostname);
+ if (!hent) {
+ perror("gethostbyname");
+ return 1;
+ }
+
+ memcpy(&addr.sin_addr, hent->h_addr, 4);
+ strcpy(hostname, hent->h_name);
+ }
+
+ printf("blktrace: connecting to %s\n", hostname);
+
+ if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
+ perror("client: connect");
+ return 1;
+ }
+
+ printf("blktrace: connected!\n");
+ net_out_fd = fd;
+ return 0;
+}
+
static char usage_str[] = \
"-d <dev> [ -r relay path ] [ -o <output> ] [-k ] [ -w time ]\n" \
"[ -a action ] [ -A action mask ] [ -v ]\n\n" \
{
fprintf(stderr, "Usage: %s %s %s",program, blktrace_version, usage_str);
}
-static void handle_sigint(__attribute__((__unused__)) int sig)
-{
- done = 1;
- if (!is_trace_stopped()) {
- trace_stopped = 1;
- stop_all_threads();
- stop_all_traces();
- }
-
- show_stats();
-}
int main(int argc, char *argv[])
{
case 'D':
output_dir = optarg;
break;
+ case 'h':
+ net_mode = Net_client;
+ strcpy(hostname, optarg);
+ break;
+ case 'l':
+ net_mode = Net_server;
+ break;
+ case 'p':
+ net_port = atoi(optarg);
+ break;
default:
show_usage(argv[0]);
return 1;
}
}
+ setlocale(LC_NUMERIC, "en_US");
+
+ page_size = getpagesize();
+
+ if (net_mode == Net_server)
+ return net_server();
+
while (optind < argc) {
if (resize_devices(argv[optind++]) != 0)
return 1;
return 0;
}
- setlocale(LC_NUMERIC, "en_US");
-
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
if (ncpus < 0) {
fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed\n");
return 1;
}
- if (start_devices() != 0)
- return 1;
-
signal(SIGINT, handle_sigint);
signal(SIGHUP, handle_sigint);
signal(SIGTERM, handle_sigint);
signal(SIGALRM, handle_sigint);
+ if (net_mode == Net_client && net_setup_client())
+ return 1;
+
+ if (start_devices() != 0)
+ return 1;
+
atexit(stop_all_tracing);
if (stop_watch)
alarm(stop_watch);
- while (!is_done())
- sleep(1);
+ wait_for_threads();
if (!is_trace_stopped()) {
trace_stopped = 1;