projects
/
fio.git
/ commitdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
| commitdiff |
tree
raw
|
patch
|
inline
| side by side (parent:
41ee231
)
Correct multiple typos in engines/libhdfs.c
author
Felix Yan
<felixonmars@archlinux.org>
Tue, 3 Mar 2020 07:37:54 +0000
(15:37 +0800)
committer
GitHub
<noreply@github.com>
Tue, 3 Mar 2020 07:37:54 +0000
(15:37 +0800)
engines/libhdfs.c
patch
|
blob
|
blame
|
history
diff --git
a/engines/libhdfs.c
b/engines/libhdfs.c
index 6000160129402472385da53b478229b123db82b2..c57fcea6353821dbdd90db055561f0424f9cbcf0 100644
(file)
--- a/
engines/libhdfs.c
+++ b/
engines/libhdfs.c
@@
-2,7
+2,7
@@
* libhdfs engine
*
* this engine helps perform read/write operations on hdfs cluster using
* libhdfs engine
*
* this engine helps perform read/write operations on hdfs cluster using
- * libhdfs. hdfs doesnot support modification of data once file is created.
+ * libhdfs. hdfs does
not support modification of data once file is created.
*
* so to mimic that create many files of small size (e.g 256k), and this
* engine select a file based on the offset generated by fio.
*
* so to mimic that create many files of small size (e.g 256k), and this
* engine select a file based on the offset generated by fio.
@@
-75,7
+75,7
@@
static struct fio_option options[] = {
.type = FIO_OPT_STR_STORE,
.off1 = offsetof(struct hdfsio_options, directory),
.def = "/",
.type = FIO_OPT_STR_STORE,
.off1 = offsetof(struct hdfsio_options, directory),
.def = "/",
- .help = "The HDFS directory where fio will create chun
c
ks",
+ .help = "The HDFS directory where fio will create chunks",
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_HDFS,
},
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_HDFS,
},
@@
-86,7
+86,7
@@
static struct fio_option options[] = {
.type = FIO_OPT_INT,
.off1 = offsetof(struct hdfsio_options, chunck_size),
.def = "1048576",
.type = FIO_OPT_INT,
.off1 = offsetof(struct hdfsio_options, chunck_size),
.def = "1048576",
- .help = "Size of individual chun
c
k",
+ .help = "Size of individual chunk",
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_HDFS,
},
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_HDFS,
},
@@
-177,7
+177,7
@@
static enum fio_q_status fio_hdfsio_queue(struct thread_data *td,
if( (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) &&
hdfsTell(hd->fs, hd->fp) != offset && hdfsSeek(hd->fs, hd->fp, offset) != 0 ) {
if( (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) &&
hdfsTell(hd->fs, hd->fp) != offset && hdfsSeek(hd->fs, hd->fp, offset) != 0 ) {
- log_err("hdfs: seek failed: %s, are you doing random write smaller than chun
c
k size ?\n", strerror(errno));
+ log_err("hdfs: seek failed: %s, are you doing random write smaller than chunk size ?\n", strerror(errno));
io_u->error = errno;
return FIO_Q_COMPLETED;
};
io_u->error = errno;
return FIO_Q_COMPLETED;
};
@@
-338,9
+338,9
@@
static int fio_hdfsio_setup(struct thread_data *td)
}
f->real_file_size = file_size;
}
}
f->real_file_size = file_size;
}
- /* If the size doesn't divide nicely with the chun
c
k size,
+ /* If the size doesn't divide nicely with the chunk size,
* make the last files bigger.
* make the last files bigger.
- * Used only if filesize was not explicit
e
ly given
+ * Used only if filesize was not explicitly given
*/
if (!td->o.file_size_low && total_file_size < td->o.size) {
f->real_file_size += (td->o.size - total_file_size);
*/
if (!td->o.file_size_low && total_file_size < td->o.size) {
f->real_file_size += (td->o.size - total_file_size);
@@
-374,7
+374,7
@@
static int fio_hdfsio_io_u_init(struct thread_data *td, struct io_u *io_u)
}
hd->fs = hdfsBuilderConnect(bld);
}
hd->fs = hdfsBuilderConnect(bld);
- /* hdfsSetWorkingDirectory succeed on non
existend
directory */
+ /* hdfsSetWorkingDirectory succeed on non
-existent
directory */
if (hdfsExists(hd->fs, options->directory) < 0 || hdfsSetWorkingDirectory(hd->fs, options->directory) < 0) {
failure = errno;
log_err("hdfs: invalid working directory %s: %s\n", options->directory, strerror(errno));
if (hdfsExists(hd->fs, options->directory) < 0 || hdfsSetWorkingDirectory(hd->fs, options->directory) < 0) {
failure = errno;
log_err("hdfs: invalid working directory %s: %s\n", options->directory, strerror(errno));