arch: Mass conversion of smp_mb__*()
[linux-2.6-block.git] / net / sunrpc / xprtsock.c
index 63ae657f255bf728c5b0780d0bace7feacde7a33..402a7e9a16b7cdf05aa5d23462b3113b1b96c4d3 100644 (file)
@@ -254,7 +254,7 @@ struct sock_xprt {
        /*
         * Saved socket callback addresses
         */
-       void                    (*old_data_ready)(struct sock *, int);
+       void                    (*old_data_ready)(struct sock *);
        void                    (*old_state_change)(struct sock *);
        void                    (*old_write_space)(struct sock *);
        void                    (*old_error_report)(struct sock *);
@@ -510,6 +510,7 @@ static int xs_nospace(struct rpc_task *task)
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
        struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+       struct sock *sk = transport->inet;
        int ret = -EAGAIN;
 
        dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
@@ -527,7 +528,7 @@ static int xs_nospace(struct rpc_task *task)
                         * window size
                         */
                        set_bit(SOCK_NOSPACE, &transport->sock->flags);
-                       transport->inet->sk_write_pending++;
+                       sk->sk_write_pending++;
                        /* ...and wait for more buffer space */
                        xprt_wait_for_buffer_space(task, xs_nospace_callback);
                }
@@ -537,6 +538,9 @@ static int xs_nospace(struct rpc_task *task)
        }
 
        spin_unlock_bh(&xprt->transport_lock);
+
+       /* Race breaker in case memory is freed before above code is called */
+       sk->sk_write_space(sk);
        return ret;
 }
 
@@ -889,11 +893,11 @@ static void xs_close(struct rpc_xprt *xprt)
        xs_reset_transport(transport);
        xprt->reestablish_timeout = 0;
 
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
        clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
        clear_bit(XPRT_CLOSING, &xprt->state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
        xprt_disconnect_done(xprt);
 }
 
@@ -947,7 +951,7 @@ static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
  *
  * Currently this assumes we can read the whole reply in a single gulp.
  */
-static void xs_local_data_ready(struct sock *sk, int len)
+static void xs_local_data_ready(struct sock *sk)
 {
        struct rpc_task *task;
        struct rpc_xprt *xprt;
@@ -1010,7 +1014,7 @@ static void xs_local_data_ready(struct sock *sk, int len)
  * @len: how much data to read
  *
  */
-static void xs_udp_data_ready(struct sock *sk, int len)
+static void xs_udp_data_ready(struct sock *sk)
 {
        struct rpc_task *task;
        struct rpc_xprt *xprt;
@@ -1307,41 +1311,29 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
  * If we're unable to obtain the rpc_rqst we schedule the closing of the
  * connection and return -1.
  */
-static inline int xs_tcp_read_callback(struct rpc_xprt *xprt,
+static int xs_tcp_read_callback(struct rpc_xprt *xprt,
                                       struct xdr_skb_reader *desc)
 {
        struct sock_xprt *transport =
                                container_of(xprt, struct sock_xprt, xprt);
        struct rpc_rqst *req;
 
-       req = xprt_alloc_bc_request(xprt);
+       /* Look up and lock the request corresponding to the given XID */
+       spin_lock(&xprt->transport_lock);
+       req = xprt_lookup_bc_request(xprt, transport->tcp_xid);
        if (req == NULL) {
+               spin_unlock(&xprt->transport_lock);
                printk(KERN_WARNING "Callback slot table overflowed\n");
                xprt_force_disconnect(xprt);
                return -1;
        }
 
-       req->rq_xid = transport->tcp_xid;
        dprintk("RPC:       read callback  XID %08x\n", ntohl(req->rq_xid));
        xs_tcp_read_common(xprt, desc, req);
 
-       if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) {
-               struct svc_serv *bc_serv = xprt->bc_serv;
-
-               /*
-                * Add callback request to callback list.  The callback
-                * service sleeps on the sv_cb_waitq waiting for new
-                * requests.  Wake it up after adding enqueing the
-                * request.
-                */
-               dprintk("RPC:       add callback request to list\n");
-               spin_lock(&bc_serv->sv_cb_lock);
-               list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
-               spin_unlock(&bc_serv->sv_cb_lock);
-               wake_up(&bc_serv->sv_cb_waitq);
-       }
-
-       req->rq_private_buf.len = transport->tcp_copied;
+       if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
+               xprt_complete_bc_request(req, transport->tcp_copied);
+       spin_unlock(&xprt->transport_lock);
 
        return 0;
 }
@@ -1445,7 +1437,7 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns
  * @bytes: how much data to read
  *
  */
-static void xs_tcp_data_ready(struct sock *sk, int bytes)
+static void xs_tcp_data_ready(struct sock *sk)
 {
        struct rpc_xprt *xprt;
        read_descriptor_t rd_desc;
@@ -1505,12 +1497,12 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
 
 static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
 {
-       smp_mb__before_clear_bit();
+       smp_mb__before_atomic();
        clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
        clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
        clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
        clear_bit(XPRT_CLOSING, &xprt->state);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 }
 
 static void xs_sock_mark_closed(struct rpc_xprt *xprt)
@@ -1564,10 +1556,10 @@ static void xs_tcp_state_change(struct sock *sk)
                xprt->connect_cookie++;
                xprt->reestablish_timeout = 0;
                set_bit(XPRT_CLOSING, &xprt->state);
-               smp_mb__before_clear_bit();
+               smp_mb__before_atomic();
                clear_bit(XPRT_CONNECTED, &xprt->state);
                clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
                xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
                break;
        case TCP_CLOSE_WAIT:
@@ -1586,9 +1578,9 @@ static void xs_tcp_state_change(struct sock *sk)
        case TCP_LAST_ACK:
                set_bit(XPRT_CLOSING, &xprt->state);
                xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
-               smp_mb__before_clear_bit();
+               smp_mb__before_atomic();
                clear_bit(XPRT_CONNECTED, &xprt->state);
-               smp_mb__after_clear_bit();
+               smp_mb__after_atomic();
                break;
        case TCP_CLOSE:
                xs_tcp_cancel_linger_timeout(xprt);
@@ -2923,15 +2915,6 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
        struct svc_sock *bc_sock;
        struct rpc_xprt *ret;
 
-       if (args->bc_xprt->xpt_bc_xprt) {
-               /*
-                * This server connection already has a backchannel
-                * transport; we can't create a new one, as we wouldn't
-                * be able to match replies based on xid any more.  So,
-                * reuse the already-existing one:
-                */
-                return args->bc_xprt->xpt_bc_xprt;
-       }
        xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
                        xprt_tcp_slot_table_entries);
        if (IS_ERR(xprt))
@@ -2991,6 +2974,8 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
 
        if (try_module_get(THIS_MODULE))
                return xprt;
+
+       args->bc_xprt->xpt_bc_xprt = NULL;
        xprt_put(xprt);
        ret = ERR_PTR(-EINVAL);
 out_err: