RDS bind and release locking scheme is very inefficient. It
uses RCU for maintaining the bind hash-table which is great but
it also needs to hold spinlock for [add/remove]_bound(). So
overall usecase, the hash-table concurrent speedup doesn't pay off.
In fact blocking nature of synchronize_rcu() makes the RDS
socket shutdown too slow which hurts RDS performance since
connection shutdown and re-connect happens quite often to
maintain the RC part of the protocol.
So we make the locking scheme simpler and more efficient by
replacing spin_locks with reader/writer locks and getting rid
off rcu for bind hash-table.
In subsequent patch, we also covert the global lock with per-bucket
lock to reduce the global lock contention.
Signed-off-by: Santosh Shilimkar <ssantosh@kernel.org>
Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
rds_clear_recv_queue(rs);
rds_cong_remove_socket(rs);
rds_clear_recv_queue(rs);
rds_cong_remove_socket(rs);
- /*
- * the binding lookup hash uses rcu, we need to
- * make sure we synchronize_rcu before we free our
- * entry
- */
rds_send_drop_to(rs, NULL);
rds_rdma_drop_keys(rs);
rds_send_drop_to(rs, NULL);
rds_rdma_drop_keys(rs);
#define BIND_HASH_SIZE 1024
static struct hlist_head bind_hash_table[BIND_HASH_SIZE];
#define BIND_HASH_SIZE 1024
static struct hlist_head bind_hash_table[BIND_HASH_SIZE];
-static DEFINE_SPINLOCK(rds_bind_lock);
+static DEFINE_RWLOCK(rds_bind_lock);
static struct hlist_head *hash_to_bucket(__be32 addr, __be16 port)
{
static struct hlist_head *hash_to_bucket(__be32 addr, __be16 port)
{
+/* must hold either read or write lock (write lock for insert != NULL) */
static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
struct rds_sock *insert)
{
static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
struct rds_sock *insert)
{
u64 cmp;
u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
u64 cmp;
u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
- rcu_read_lock();
- hlist_for_each_entry_rcu(rs, head, rs_bound_node) {
+ hlist_for_each_entry(rs, head, rs_bound_node) {
cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) |
be16_to_cpu(rs->rs_bound_port);
cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) |
be16_to_cpu(rs->rs_bound_port);
- if (cmp == needle) {
- rcu_read_unlock();
if (insert) {
/*
* make sure our addr and port are set before
if (insert) {
/*
* make sure our addr and port are set before
- * we are added to the list, other people
- * in rcu will find us as soon as the
- * hlist_add_head_rcu is done
+ * we are added to the list.
*/
insert->rs_bound_addr = addr;
insert->rs_bound_port = port;
rds_sock_addref(insert);
*/
insert->rs_bound_addr = addr;
insert->rs_bound_port = port;
rds_sock_addref(insert);
- hlist_add_head_rcu(&insert->rs_bound_node, head);
+ hlist_add_head(&insert->rs_bound_node, head);
struct rds_sock *rds_find_bound(__be32 addr, __be16 port)
{
struct rds_sock *rs;
struct rds_sock *rds_find_bound(__be32 addr, __be16 port)
{
struct rds_sock *rs;
+ read_lock_irqsave(&rds_bind_lock, flags);
rs = rds_bind_lookup(addr, port, NULL);
rs = rds_bind_lookup(addr, port, NULL);
+ read_unlock_irqrestore(&rds_bind_lock, flags);
if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
rds_sock_addref(rs);
if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
rds_sock_addref(rs);
rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr,
ntohs(port));
rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr,
ntohs(port));
- spin_lock_irqsave(&rds_bind_lock, flags);
+ write_lock_irqsave(&rds_bind_lock, flags);
}
} while (rover++ != last);
}
} while (rover++ != last);
- spin_unlock_irqrestore(&rds_bind_lock, flags);
+ write_unlock_irqrestore(&rds_bind_lock, flags);
- spin_lock_irqsave(&rds_bind_lock, flags);
+ write_lock_irqsave(&rds_bind_lock, flags);
if (rs->rs_bound_addr) {
rdsdebug("rs %p unbinding from %pI4:%d\n",
rs, &rs->rs_bound_addr,
ntohs(rs->rs_bound_port));
if (rs->rs_bound_addr) {
rdsdebug("rs %p unbinding from %pI4:%d\n",
rs, &rs->rs_bound_addr,
ntohs(rs->rs_bound_port));
- hlist_del_init_rcu(&rs->rs_bound_node);
+ hlist_del_init(&rs->rs_bound_node);
rds_sock_put(rs);
rs->rs_bound_addr = 0;
}
rds_sock_put(rs);
rs->rs_bound_addr = 0;
}
- spin_unlock_irqrestore(&rds_bind_lock, flags);
+ write_unlock_irqrestore(&rds_bind_lock, flags);
}
int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
}
int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
-
- /* we might have called rds_remove_bound on error */
- if (ret)
- synchronize_rcu();