Set to 1 only when Direct Write to PMem from the remote host is possible.
Otherwise, set to 0.
+.. option:: busy_wait_polling=bool : [librpma_*_server]
+
+ Set to 0 to wait for completion instead of busy-wait polling completion.
+ Default: 1.
+
.. option:: interface=str : [netsplice] [net]
The IP address of the network interface used to send or receive UDP
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_LIBRPMA,
},
+ {
+ .name = "busy_wait_polling",
+ .lname = "Set to 0 to wait for completion instead of busy-wait polling completion.",
+ .type = FIO_OPT_BOOL,
+ .off1 = offsetof(struct librpma_fio_options_values,
+ busy_wait_polling),
+ .help = "Set to false if you want to reduce CPU usage",
+ .def = "1",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_LIBRPMA,
+ },
{
.name = NULL,
},
char *port;
/* Direct Write to PMem is possible */
unsigned int direct_write_to_pmem;
+ /* Set to 0 to wait for completion instead of busy-wait polling completion. */
+ unsigned int busy_wait_polling;
};
extern struct fio_option librpma_fio_options[];
struct librpma_fio_server_data *csd = td->io_ops_data;
struct server_data *sd = csd->server_data;
struct rpma_completion *cmpl = &sd->msgs_queued[sd->msg_queued_nr];
+ struct librpma_fio_options_values *o = td->eo;
int ret;
ret = rpma_conn_completion_get(csd->conn, cmpl);
if (ret == RPMA_E_NO_COMPLETION) {
- /* lack of completion is not an error */
- return 0;
+ if (o->busy_wait_polling == 0) {
+ ret = rpma_conn_completion_wait(csd->conn);
+ if (ret == RPMA_E_NO_COMPLETION) {
+ /* lack of completion is not an error */
+ return 0;
+ } else if (ret != 0) {
+ librpma_td_verror(td, ret, "rpma_conn_completion_wait");
+ goto err_terminate;
+ }
+
+ ret = rpma_conn_completion_get(csd->conn, cmpl);
+ if (ret == RPMA_E_NO_COMPLETION) {
+ /* lack of completion is not an error */
+ return 0;
+ } else if (ret != 0) {
+ librpma_td_verror(td, ret, "rpma_conn_completion_get");
+ goto err_terminate;
+ }
+ } else {
+ /* lack of completion is not an error */
+ return 0;
+ }
} else if (ret != 0) {
librpma_td_verror(td, ret, "rpma_conn_completion_get");
goto err_terminate;
# set to 1 (true) ONLY when Direct Write to PMem from the remote host is possible
# (https://pmem.io/rpma/documentation/basic-direct-write-to-pmem.html)
direct_write_to_pmem=0
+# set to 0 (false) to wait for completion instead of busy-wait polling completion.
+busy_wait_polling=1
numjobs=1 # number of expected incomming connections
iodepth=2 # number of parallel GPSPM requests
size=100MiB # size of workspace for a single connection
.BI (librpma_*_server)direct_write_to_pmem \fR=\fPbool
Set to 1 only when Direct Write to PMem from the remote host is possible. Otherwise, set to 0.
.TP
+.BI (librpma_*_server)busy_wait_polling \fR=\fPbool
+Set to 0 to wait for completion instead of busy-wait polling completion.
+Default: 1.
+.TP
.BI (netsplice,net)interface \fR=\fPstr
The IP address of the network interface used to send or receive UDP
multicast.