#ifndef __LIBCFS_TIME_H__
#define __LIBCFS_TIME_H__
-/*
- * generic time manipulation functions.
- */
-
-static inline int cfs_time_after(unsigned long t1, unsigned long t2)
-{
- return time_before(t2, t1);
-}
-
-static inline int cfs_time_aftereq(unsigned long t1, unsigned long t2)
-{
- return time_before_eq(t2, t1);
-}
-
/*
* return valid time-out based on user supplied one. Currently we only check
* that time-out is not shorted than allowed.
return 0;
if (fpo->fpo_failed)
return 1;
- return cfs_time_aftereq(now, fpo->fpo_deadline);
+ return time_after_eq(now, fpo->fpo_deadline);
}
static int
return 0;
if (pool->po_failed)
return 1;
- return cfs_time_aftereq(now, pool->po_deadline);
+ return time_after_eq(now, pool->po_deadline);
}
void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node)
kiblnd_send_keepalive(struct kib_conn *conn)
{
return (*kiblnd_tunables.kib_keepalive > 0) &&
- cfs_time_after(jiffies, conn->ibc_last_send +
- msecs_to_jiffies(*kiblnd_tunables.kib_keepalive *
- MSEC_PER_SEC));
+ time_after(jiffies, conn->ibc_last_send +
+ msecs_to_jiffies(*kiblnd_tunables.kib_keepalive *
+ MSEC_PER_SEC));
}
static inline int
LASSERT(tx->tx_waiting || tx->tx_sending);
}
- if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
+ if (time_after_eq(jiffies, tx->tx_deadline)) {
CERROR("Timed out tx: %s, %lu seconds\n",
kiblnd_queue2str(conn, txs),
cfs_duration_sec(jiffies - tx->tx_deadline));
LASSERT(conn->ksnc_tx_scheduled);
list_add_tail(&conn->ksnc_tx_list,
&ksocknal_data.ksnd_enomem_conns);
- if (!cfs_time_aftereq(jiffies + SOCKNAL_ENOMEM_RETRY,
+ if (!time_after_eq(jiffies + SOCKNAL_ENOMEM_RETRY,
ksocknal_data.ksnd_reaper_waketime))
wake_up(&ksocknal_data.ksnd_reaper_waitq);
case SOCKNAL_MATCH_YES: /* typed connection */
if (!typed || tnob > nob ||
(tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
- cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
+ time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
typed = c;
tnob = nob;
}
case SOCKNAL_MATCH_MAY: /* fallback connection */
if (!fallback || fnob > nob ||
(fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
- cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
+ time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
fallback = c;
fnob = nob;
}
continue;
if (!(!route->ksnr_retry_interval || /* first attempt */
- cfs_time_aftereq(now, route->ksnr_timeout))) {
+ time_after_eq(now, route->ksnr_timeout))) {
CDEBUG(D_NET,
"Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n",
&route->ksnr_ipaddr,
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
- if (cfs_time_aftereq(jiffies, deadline)) {
+ if (time_after_eq(jiffies, deadline)) {
rc = -ETIMEDOUT;
lnet_connect_console_error(rc, peer->ksnp_id.nid,
route->ksnr_ipaddr,
list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
ksnr_connd_list) {
if (!route->ksnr_retry_interval ||
- cfs_time_aftereq(now, route->ksnr_timeout))
+ time_after_eq(now, route->ksnr_timeout))
return route;
if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
}
if (conn->ksnc_rx_started &&
- cfs_time_aftereq(jiffies,
- conn->ksnc_rx_deadline)) {
+ time_after_eq(jiffies,
+ conn->ksnc_rx_deadline)) {
/* Timed out incomplete incoming message */
ksocknal_conn_addref(conn);
CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %zd left %d\n",
if ((!list_empty(&conn->ksnc_tx_queue) ||
conn->ksnc_sock->sk->sk_wmem_queued) &&
- cfs_time_aftereq(jiffies,
- conn->ksnc_tx_deadline)) {
+ time_after_eq(jiffies,
+ conn->ksnc_tx_deadline)) {
/*
* Timed out messages queued for sending or
* buffered in the socket's send buffer
write_lock_bh(&ksocknal_data.ksnd_global_lock);
list_for_each_entry_safe(tx, tmp, &peer->ksnp_tx_queue, tx_list) {
- if (!cfs_time_aftereq(jiffies,
- tx->tx_deadline))
+ if (!time_after_eq(jiffies,
+ tx->tx_deadline))
break;
list_del(&tx->tx_list);
tx = list_entry(peer->ksnp_tx_queue.next,
struct ksock_tx, tx_list);
- if (cfs_time_aftereq(jiffies,
- tx->tx_deadline)) {
+ if (time_after_eq(jiffies,
+ tx->tx_deadline)) {
ksocknal_peer_addref(peer);
read_unlock(&ksocknal_data.ksnd_global_lock);
tx_stale = NULL;
spin_lock(&peer->ksnp_lock);
list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
- if (!cfs_time_aftereq(jiffies,
- tx->tx_deadline))
+ if (!time_after_eq(jiffies,
+ tx->tx_deadline))
break;
/* ignore the TX if connection is being closed */
if (tx->tx_conn->ksnc_closing)
if (cdls) {
if (libcfs_console_ratelimit &&
cdls->cdls_next && /* not first time ever */
- !cfs_time_after(jiffies, cdls->cdls_next)) {
+ !time_after(jiffies, cdls->cdls_next)) {
/* skipping a console message */
cdls->cdls_count++;
if (tcd)
return 1;
}
- if (cfs_time_after(jiffies,
- cdls->cdls_next + libcfs_console_max_delay +
- 10 * HZ)) {
+ if (time_after(jiffies,
+ cdls->cdls_next + libcfs_console_max_delay +
+ 10 * HZ)) {
/* last timeout was a long time ago */
cdls->cdls_delay /= libcfs_console_backoff * 4;
} else {
* ignore the initial assumed death (see lnet_peers_start_down()).
*/
if (!lp->lp_alive && lp->lp_alive_count > 0 &&
- cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive))
+ time_after_eq(lp->lp_timestamp, lp->lp_last_alive))
return 0;
deadline = lp->lp_last_alive + lp->lp_ni->ni_peertimeout * HZ;
- alive = cfs_time_after(deadline, now);
+ alive = time_after(deadline, now);
/* Update obsolete lp_alive except for routers assumed to be dead
* initially, because router checker would update aliveness in this
unsigned long now = jiffies;
rule->dr_stat.fs_count++;
- drop = cfs_time_aftereq(now, rule->dr_drop_time);
+ drop = time_after_eq(now, rule->dr_drop_time);
if (drop) {
- if (cfs_time_after(now, rule->dr_time_base))
+ if (time_after(now, rule->dr_time_base))
rule->dr_time_base = now;
rule->dr_drop_time = rule->dr_time_base +
unsigned long now = jiffies;
rule->dl_stat.fs_count++;
- delay = cfs_time_aftereq(now, rule->dl_delay_time);
+ delay = time_after_eq(now, rule->dl_delay_time);
if (delay) {
- if (cfs_time_after(now, rule->dl_time_base))
+ if (time_after(now, rule->dl_time_base))
rule->dl_time_base = now;
rule->dl_delay_time = rule->dl_time_base +
lnet_peer_addref_locked(rtr);
if (rtr->lp_ping_deadline && /* ping timed out? */
- cfs_time_after(now, rtr->lp_ping_deadline))
+ time_after(now, rtr->lp_ping_deadline))
lnet_notify_locked(rtr, 1, 0, now);
/* Run any outstanding notifications */
rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
if (secs && !rtr->lp_ping_notsent &&
- cfs_time_after(now, rtr->lp_ping_timestamp + secs * HZ)) {
+ time_after(now, rtr->lp_ping_timestamp + secs * HZ)) {
int rc;
struct lnet_process_id id;
struct lnet_handle_md mdh;
}
/* can't do predictions... */
- if (cfs_time_after(when, now)) {
+ if (time_after(when, now)) {
CWARN("Ignoring prediction from %s of %s %s %ld seconds in the future\n",
!ni ? "userspace" : libcfs_nid2str(ni->ni_nid),
libcfs_nid2str(nid), alive ? "up" : "down",
continue;
nd = crpc->crp_node;
- if (cfs_time_after(nd->nd_stamp, crpc->crp_stamp))
+ if (time_after(nd->nd_stamp, crpc->crp_stamp))
continue;
nd->nd_stamp = crpc->crp_stamp;
crpc->crp_unpacked = 1;
}
- if (cfs_time_after(nd->nd_stamp, crpc->crp_stamp))
+ if (time_after(nd->nd_stamp, crpc->crp_stamp))
return 0;
nd->nd_stamp = crpc->crp_stamp;
spin_lock(&stt_data.stt_lock);
- while (cfs_time_aftereq(this_slot, *last)) {
+ while (time_after_eq(this_slot, *last)) {
expired += stt_expire_list(STTIMER_SLOT(this_slot), now);
this_slot = this_slot - STTIMER_SLOTTIME;
}
lock_res_and_lock(lock);
if (lock->l_granted_mode == LCK_PW &&
!lock->l_readers && !lock->l_writers &&
- cfs_time_after(jiffies,
- lock->l_last_used + 10 * HZ)) {
+ time_after(jiffies,
+ lock->l_last_used + 10 * HZ)) {
unlock_res_and_lock(lock);
if (ldlm_bl_to_thread_lock(ns, NULL, lock))
ldlm_handle_bl_callback(ns, NULL, lock);
(s64)lock->l_last_activity,
(s64)(ktime_get_real_seconds() -
lock->l_last_activity));
- if (cfs_time_after(jiffies, next_dump)) {
+ if (time_after(jiffies, next_dump)) {
last_dump = next_dump;
next_dump = jiffies + 300 * HZ;
ldlm_namespace_dump(D_DLMTRACE,
* Despite of the LV, It doesn't make sense to keep the lock which
* is unused for ns_max_age time.
*/
- if (cfs_time_after(jiffies, lock->l_last_used + ns->ns_max_age))
+ if (time_after(jiffies, lock->l_last_used + ns->ns_max_age))
return LDLM_POLICY_CANCEL_LOCK;
slv = ldlm_pool_get_slv(pl);
* ll_file_is_contended.
*/
retry_time = obj->oo_contention_time + osc_contention_time * HZ;
- if (cfs_time_after(cur_time, retry_time)) {
+ if (time_after(cur_time, retry_time)) {
osc_object_clear_contended(obj);
return 0;
}
OBD_CONNECT_GRANT_SHRINK) == 0)
return 0;
- if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
+ if (time_after_eq(time, next_shrink - 5 * CFS_TICK)) {
/* Get the current RPC size directly, instead of going via:
* cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
* Keep comment here so that it can be found by searching.
imp->imp_force_verify = 0;
- if (cfs_time_aftereq(imp->imp_next_ping - 5 * CFS_TICK, this_ping) &&
+ if (time_after_eq(imp->imp_next_ping - 5 * CFS_TICK, this_ping) &&
!force) {
spin_unlock(&imp->imp_lock);
return;
ptlrpc_pinger_process_import(imp, this_ping);
/* obd_timeout might have changed */
if (imp->imp_pingable && imp->imp_next_ping &&
- cfs_time_after(imp->imp_next_ping,
- this_ping + PING_INTERVAL * HZ))
+ time_after(imp->imp_next_ping,
+ this_ping + PING_INTERVAL * HZ))
ptlrpc_update_next_ping(imp, 0);
}
mutex_unlock(&pinger_mutex);