ret = fio_sgio_rw_doio(io_u->file, io_u, 0);
- if (ret < 0)
- for (i = 0; i < st->unmap_range_count; i++)
- st->trim_io_us[i]->error = errno;
- else if (hdr->status)
- for (i = 0; i < st->unmap_range_count; i++) {
- st->trim_io_us[i]->resid = hdr->resid;
- st->trim_io_us[i]->error = EIO;
+ if (ret < 0 || hdr->status) {
+ int error;
+
+ if (ret < 0)
+ error = errno;
+ else {
+ error = EIO;
+ ret = -EIO;
}
- else {
- if (fio_fill_issue_time(td)) {
- fio_gettime(&now, NULL);
- for (i = 0; i < st->unmap_range_count; i++) {
- struct io_u *io_u = st->trim_io_us[i];
-
- memcpy(&io_u->issue_time, &now, sizeof(now));
- io_u_queued(td, io_u);
- }
+
+ for (i = 0; i < st->unmap_range_count; i++) {
+ st->trim_io_us[i]->error = error;
+ clear_io_u(td, st->trim_io_us[i]);
+ if (hdr->status)
+ st->trim_io_us[i]->resid = hdr->resid;
}
- io_u_mark_submit(td, st->unmap_range_count);
+
+ td_verror(td, error, "xfer");
+ return ret;
}
- if (io_u->error) {
- td_verror(td, io_u->error, "xfer");
- return 0;
+ if (fio_fill_issue_time(td)) {
+ fio_gettime(&now, NULL);
+ for (i = 0; i < st->unmap_range_count; i++) {
+ memcpy(&st->trim_io_us[i]->issue_time, &now, sizeof(now));
+ io_u_queued(td, io_u);
+ }
}
+ io_u_mark_submit(td, st->unmap_range_count);
- if (ret == FIO_Q_QUEUED)
- return 0;
- else
- return ret;
+ return 0;
}
static struct io_u *fio_sgio_event(struct thread_data *td, int event)
* io_u structures, which are not initialized until later.
*/
struct sg_io_hdr hdr;
+ unsigned long long hlba;
+ unsigned int blksz = 0;
unsigned char cmd[16];
unsigned char sb[64];
unsigned char buf[32]; // read capacity return
return ret;
}
- *bs = ((unsigned long) buf[4] << 24) | ((unsigned long) buf[5] << 16) |
- ((unsigned long) buf[6] << 8) | (unsigned long) buf[7];
- *max_lba = ((unsigned long) buf[0] << 24) | ((unsigned long) buf[1] << 16) |
- ((unsigned long) buf[2] << 8) | (unsigned long) buf[3];
+ if (hdr.info & SG_INFO_CHECK) {
+ /* RCAP(10) might be unsupported by device. Force RCAP(16) */
+ hlba = MAX_10B_LBA;
+ } else {
+ blksz = ((unsigned long) buf[4] << 24) | ((unsigned long) buf[5] << 16) |
+ ((unsigned long) buf[6] << 8) | (unsigned long) buf[7];
+ hlba = ((unsigned long) buf[0] << 24) | ((unsigned long) buf[1] << 16) |
+ ((unsigned long) buf[2] << 8) | (unsigned long) buf[3];
+ }
/*
* If max lba masked by MAX_10B_LBA equals MAX_10B_LBA,
* then need to retry with 16 byte Read Capacity command.
*/
- if (*max_lba == MAX_10B_LBA) {
+ if (hlba == MAX_10B_LBA) {
hdr.cmd_len = 16;
hdr.cmdp[0] = 0x9e; // service action
hdr.cmdp[1] = 0x10; // Read Capacity(16)
if (hdr.info & SG_INFO_CHECK)
td_verror(td, EIO, "fio_sgio_read_capacity");
- *bs = (buf[8] << 24) | (buf[9] << 16) | (buf[10] << 8) | buf[11];
- *max_lba = ((unsigned long long)buf[0] << 56) |
- ((unsigned long long)buf[1] << 48) |
- ((unsigned long long)buf[2] << 40) |
- ((unsigned long long)buf[3] << 32) |
- ((unsigned long long)buf[4] << 24) |
- ((unsigned long long)buf[5] << 16) |
- ((unsigned long long)buf[6] << 8) |
- (unsigned long long)buf[7];
+ blksz = (buf[8] << 24) | (buf[9] << 16) | (buf[10] << 8) | buf[11];
+ hlba = ((unsigned long long)buf[0] << 56) |
+ ((unsigned long long)buf[1] << 48) |
+ ((unsigned long long)buf[2] << 40) |
+ ((unsigned long long)buf[3] << 32) |
+ ((unsigned long long)buf[4] << 24) |
+ ((unsigned long long)buf[5] << 16) |
+ ((unsigned long long)buf[6] << 8) |
+ (unsigned long long)buf[7];
+ }
+
+ if (blksz) {
+ *bs = blksz;
+ *max_lba = hlba;
+ ret = 0;
+ } else {
+ ret = EIO;
}
close(fd);
- return 0;
+ return ret;
}
static void fio_sgio_cleanup(struct thread_data *td)