__u64 sector = __r->sector;
r->device = be32_to_cpu(__r->device);
+ r->device_from = be32_to_cpu(__r->device_from);
r->sector = be64_to_cpu(sector);
}
struct blk_io_trace *t, unsigned long long elapsed,
int pdu_len, unsigned char *pdu_buf)
{
+ struct blk_io_trace_remap r = { .device = 0, };
char rwbs[6];
char *name;
/*
* The header is always the same
*/
+ if (act[0] == 'A') { /* Remap */
+ get_pdu_remap(t, &r);
+ t->device = r.device_from;
+ }
+
fprintf(ofp, "%3d,%-3d %2d %8d %5d.%09lu %5u %2s %3s ",
MAJOR(t->device), MINOR(t->device), pci->cpu, t->sequence,
(int) SECONDS(t->time), (unsigned long) NANO_SECONDS(t->time),
fprintf(ofp, "[%s] %u\n", name, get_pdu_int(t));
break;
- case 'A': { /* remap */
- struct blk_io_trace_remap r;
-
- get_pdu_remap(t, &r);
+ case 'A': /* remap */
fprintf(ofp, "%llu + %u <- (%d,%d) %llu\n",
(unsigned long long) t->sector, t_sec(t),
MAJOR(r.device), MINOR(r.device),
(unsigned long long) r.sector);
break;
- }
case 'X': /* Split */
fprintf(ofp, "%llu / %u [%s]\n", (unsigned long long) t->sector,
struct io *iop;
struct list_head *p;
- __list_for_each(p, head) {
+ if (head != NULL) __list_for_each(p, head) {
iop = list_entry(p, struct io, dev_head);
if (is_bit(iop_in, iop))
return iop;
struct io *iop;
struct list_head *p;
- __list_for_each(p, head) {
+ if (head != NULL) __list_for_each(p, head) {
iop = list_entry(p, struct io, dev_head);
if (in_bit(iop, iop_in))
return iop;
struct io *iop;
struct list_head *p;
- __list_for_each(p, head) {
+ if (head != NULL) __list_for_each(p, head) {
iop = list_entry(p, struct io, dev_head);
if (BIT_START(iop) == sector)
return iop;
struct io *iop;
struct list_head *p;
- __list_for_each(p, head) {
+ if (head != NULL) __list_for_each(p, head) {
iop = list_entry(p, struct io, dev_head);
if (BIT_END(iop) == sector)
return iop;
struct io *iop;
struct list_head *p;
- __list_for_each(p, head) {
+ if (head != NULL) __list_for_each(p, head) {
iop = list_entry(p, struct io, dev_head);
if (BIT_START(iop) <= sector && sector <= BIT_END(iop))
return iop;
struct io *iop;
struct list_head *p;
- __list_for_each(p, head) {
+ if (head != NULL) __list_for_each(p, head) {
iop = list_entry(p, struct io, dev_head);
if (iop->t.cpu == t->cpu && iop->t.sequence == (t->sequence-1))
return iop;
struct list_head *p;
struct io_list *iolp;
- __list_for_each(p, head) {
+ if (head != NULL) __list_for_each(p, head) {
m_iop = list_entry(p, struct io, dev_head);
if (in_bit(m_iop, d_iop)) {
iolp = malloc(sizeof(*iolp));
struct list_head *p;
struct io_list *iolp;
- __list_for_each(p, head) {
+ if (head != NULL) __list_for_each(p, head) {
q_iop = list_entry(p, struct io, dev_head);
if (in_bit(q_iop, i_iop)) {
iolp = malloc(sizeof(*iolp));
}
else
iop->u.q.qp_type = Q_NONE;
-
-#if defined(LVM_REMAP_WORKAROUND)
- if (is_lvm) {
- tmp = dip_find_qa(dip_get_head(iop->dip, IOP_A), &iop->t);
- if (tmp) {
- iop->u.q.qp_type = Q_A;
- io_link(&iop->u.q.qp.q_a, tmp);
- }
- }
-#endif
}
void handle_merge(struct io *iop)
io_link(&iop->u.x.x_q, q_iop);
}
-#if 0
-void __x_add_c(struct io *y_iop, struct io *x_iop,
- struct blk_io_trace_split_end *rp, int which)
-{
- __u32 dev;
- __u64 sector;
- struct d_info *dip;
- struct io **y_cp, *q_iop, *c_iop;
-
- if (which == 1) {
- y_cp = &y_iop->u.y.y_c1;
- dev = be32_to_cpu(rp->dev1);
- sector = be64_to_cpu(rp->sector1);
- }
- else {
- y_cp = &y_iop->u.y.y_c2;
- dev = be32_to_cpu(rp->dev2);
- sector = be64_to_cpu(rp->sector2);
- }
-
- dip = __dip_find(dev);
- ASSERT(dip != NULL);
-
- q_iop = dip_find_end(dip_get_head(dip, IOP_Q), sector);
- if (q_iop) {
- q_iop->u.q.qp_type = Q_X;
- io_link(&q_iop->u.q.qp.q_x, x_iop);
- }
-
- c_iop = dip_find_in_sec(dip_get_head(dip, IOP_C), sector);
- if (c_iop)
- io_link(y_cp, c_iop);
-}
-
-void handle_split_end(struct io *iop)
-{
- struct io *x_iop;
- struct blk_io_trace_split_end *rp = iop->pdu;
-
- pending_xs--;
- io_setup(iop, IOP_Y);
-
- x_iop = dip_find_exact(dip_get_head(iop->dip, IOP_X), iop);
- if (x_iop) {
- __x_add_c(iop, x_iop, rp, 1);
- __x_add_c(iop, x_iop, rp, 2);
-
- rem_c(iop->u.y.y_c1);
- rem_c(iop->u.y.y_c2);
-
- add_cy(iop);
- }
- else
- release_iop(iop);
-}
-#endif
-
void handle_remap(struct io *iop)
{
- struct io *q_iop;
+ struct io *q_iop, *a_iop;
struct blk_io_trace_remap *rp = iop->pdu;
__u32 dev = be32_to_cpu(rp->device);
__u64 sector = be64_to_cpu(rp->sector);
+ io_setup(iop, IOP_A);
q_iop = dip_find_in_sec(dip_get_head_dev(dev, IOP_Q), sector);
if (q_iop) {
- io_setup(iop, IOP_A);
+ iop->u.a.ap_type = A_Q;
+ io_link(&iop->u.a.ap.a_q, q_iop);
+ return;
+ }
-#if defined(LVM_REMAP_WORKAROUND)
- if (is_lvm) {
- sector = iop->t.sector;
- iop->t.sector = be64_to_cpu(rp->sector);
- }
-#endif
- io_link(&iop->u.a.a_q, q_iop);
+ a_iop = dip_find_in_sec(dip_get_head_dev(dev, IOP_A), sector);
+ if (a_iop) {
+ iop->u.a.ap_type = A_A;
+ io_link(&iop->u.a.ap.a_a, a_iop);
+ return;
}
- else
- release_iop(iop);
+
+ iop->u.a.ap_type = A_NONE;
}
void extract_i(struct io *i_iop)
case __BLK_TA_COMPLETE: handle_complete(iop); break;
case __BLK_TA_INSERT: handle_insert(iop); break;
case __BLK_TA_SPLIT: handle_split(iop); break;
-#if 0
- case __BLK_TA_SPLIT_END: handle_split_end(iop); break;
-#endif
case __BLK_TA_REMAP: handle_remap(iop); break;
case __BLK_TA_REQUEUE: handle_requeue(iop); break;
}