staging: unisys: visornic: add error handling for visorchannel_signalinsert/remove
authorDavid Kershner <david.kershner@unisys.com>
Tue, 28 Mar 2017 13:34:37 +0000 (09:34 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 29 Mar 2017 07:17:02 +0000 (09:17 +0200)
Since signalinsert/remove now return valid error codes, we need to check
them when we call them in visornic. The error codes need to propagate out
to the calling functions.

Signed-off-by: David Kershner <david.kershner@unisys.com>
Reviewed-by: Reviewed-by: Tim Sell <timothy.sell@unisys.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/unisys/visornic/visornic_main.c

index ac8ed048080df33e9fc2acb4e2a59ee6d685e503..feece918e3481973ae56b19f1c6a548f585fd175 100644 (file)
@@ -407,12 +407,14 @@ alloc_rcv_buf(struct net_device *netdev)
  *     @skb: skb to give to the IO partition
  *
  *     Send the skb to the IO Partition.
- *     Returns void
+ *     Returns 0 or error
  */
-static void
+static int
 post_skb(struct uiscmdrsp *cmdrsp,
         struct visornic_devdata *devdata, struct sk_buff *skb)
 {
+       int err;
+
        cmdrsp->net.buf = skb;
        cmdrsp->net.rcvpost.frag.pi_pfn = page_to_pfn(virt_to_page(skb->data));
        cmdrsp->net.rcvpost.frag.pi_off =
@@ -420,18 +422,23 @@ post_skb(struct uiscmdrsp *cmdrsp,
        cmdrsp->net.rcvpost.frag.pi_len = skb->len;
        cmdrsp->net.rcvpost.unique_num = devdata->incarnation_id;
 
-       if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) <= PI_PAGE_SIZE) {
-               cmdrsp->net.type = NET_RCV_POST;
-               cmdrsp->cmdtype = CMD_NET_TYPE;
-               if (!visorchannel_signalinsert(devdata->dev->visorchannel,
-                                              IOCHAN_TO_IOPART,
-                                              cmdrsp)) {
-                       atomic_inc(&devdata->num_rcvbuf_in_iovm);
-                       devdata->chstat.sent_post++;
-               } else {
-                       devdata->chstat.sent_post_failed++;
-               }
+       if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) > PI_PAGE_SIZE)
+               return -EINVAL;
+
+       cmdrsp->net.type = NET_RCV_POST;
+       cmdrsp->cmdtype = CMD_NET_TYPE;
+       err = visorchannel_signalinsert(devdata->dev->visorchannel,
+                                       IOCHAN_TO_IOPART,
+                                       cmdrsp);
+       if (err) {
+               devdata->chstat.sent_post_failed++;
+               return err;
        }
+
+       atomic_inc(&devdata->num_rcvbuf_in_iovm);
+       devdata->chstat.sent_post++;
+
+       return 0;
 }
 
 /*
@@ -442,20 +449,25 @@ post_skb(struct uiscmdrsp *cmdrsp,
  *     @devdata: visornic device we are enabling/disabling
  *
  *     Send the enable/disable message to the IO Partition.
- *     Returns void
+ *     Returns 0 or error
  */
-static void
+static int
 send_enbdis(struct net_device *netdev, int state,
            struct visornic_devdata *devdata)
 {
+       int err;
+
        devdata->cmdrsp_rcv->net.enbdis.enable = state;
        devdata->cmdrsp_rcv->net.enbdis.context = netdev;
        devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
        devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
-       if (!visorchannel_signalinsert(devdata->dev->visorchannel,
-                                      IOCHAN_TO_IOPART,
-                                      devdata->cmdrsp_rcv))
-               devdata->chstat.sent_enbdis++;
+       err = visorchannel_signalinsert(devdata->dev->visorchannel,
+                                       IOCHAN_TO_IOPART,
+                                       devdata->cmdrsp_rcv);
+       if (err)
+               return err;
+       devdata->chstat.sent_enbdis++;
+       return 0;
 }
 
 /*
@@ -476,6 +488,7 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
        int i;
        unsigned long flags;
        int wait = 0;
+       int err;
 
        /* send a msg telling the other end we are stopping incoming pkts */
        spin_lock_irqsave(&devdata->priv_lock, flags);
@@ -485,8 +498,11 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
 
        /* send disable and wait for ack -- don't hold lock when sending
         * disable because if the queue is full, insert might sleep.
+        * If an error occurs, don't wait for the timeout.
         */
-       send_enbdis(netdev, 0, devdata);
+       err = send_enbdis(netdev, 0, devdata);
+       if (err)
+               return err;
 
        /* wait for ack to arrive before we try to free rcv buffers
         * NOTE: the other end automatically unposts the rcv buffers when
@@ -555,7 +571,7 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
 static int
 init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
 {
-       int i, count;
+       int i, j, count, err;
 
        /* allocate fixed number of receive buffers to post to uisnic
         * post receive buffers after we've allocated a required amount
@@ -585,8 +601,25 @@ init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
         * lock - we've not enabled nor started the queue so there shouldn't
         * be any rcv or xmit activity
         */
-       for (i = 0; i < count; i++)
-               post_skb(devdata->cmdrsp_rcv, devdata, devdata->rcvbuf[i]);
+       for (i = 0; i < count; i++) {
+               err = post_skb(devdata->cmdrsp_rcv, devdata,
+                              devdata->rcvbuf[i]);
+               if (!err)
+                       continue;
+
+               /* Error handling -
+                * If we posted at least one skb, we should return success,
+                * but need to free the resources that we have not successfully
+                * posted.
+                */
+               for (j = i; j < count; j++) {
+                       kfree_skb(devdata->rcvbuf[j]);
+                       devdata->rcvbuf[j] = NULL;
+               }
+               if (i == 0)
+                       return err;
+               break;
+       }
 
        return 0;
 }
@@ -603,7 +636,7 @@ init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
 static int
 visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
 {
-       int i;
+       int err = 0;
        struct visornic_devdata *devdata = netdev_priv(netdev);
        unsigned long flags;
        int wait = 0;
@@ -613,11 +646,11 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
        /* NOTE: the other end automatically unposts the rcv buffers when it
         * gets a disable.
         */
-       i = init_rcv_bufs(netdev, devdata);
-       if (i < 0) {
+       err = init_rcv_bufs(netdev, devdata);
+       if (err < 0) {
                dev_err(&netdev->dev,
-                       "%s failed to init rcv bufs (%d)\n", __func__, i);
-               return i;
+                       "%s failed to init rcv bufs\n", __func__);
+               return err;
        }
 
        spin_lock_irqsave(&devdata->priv_lock, flags);
@@ -631,9 +664,12 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
        spin_unlock_irqrestore(&devdata->priv_lock, flags);
 
        /* send enable and wait for ack -- don't hold lock when sending enable
-        * because if the queue is full, insert might sleep.
+        * because if the queue is full, insert might sleep. If an error
+        * occurs error out.
         */
-       send_enbdis(netdev, 1, devdata);
+       err = send_enbdis(netdev, 1, devdata);
+       if (err)
+               return err;
 
        spin_lock_irqsave(&devdata->priv_lock, flags);
        while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
@@ -801,6 +837,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
        int len, firstfraglen, padlen;
        struct uiscmdrsp *cmdrsp = NULL;
        unsigned long flags;
+       int err;
 
        devdata = netdev_priv(netdev);
        spin_lock_irqsave(&devdata->priv_lock, flags);
@@ -917,8 +954,9 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
                return NETDEV_TX_OK;
        }
 
-       if (visorchannel_signalinsert(devdata->dev->visorchannel,
-                                     IOCHAN_TO_IOPART, cmdrsp)) {
+       err = visorchannel_signalinsert(devdata->dev->visorchannel,
+                                       IOCHAN_TO_IOPART, cmdrsp);
+       if (err) {
                netif_stop_queue(netdev);
                spin_unlock_irqrestore(&devdata->priv_lock, flags);
                devdata->busy_cnt++;
@@ -996,6 +1034,7 @@ visornic_set_multi(struct net_device *netdev)
 {
        struct uiscmdrsp *cmdrsp;
        struct visornic_devdata *devdata = netdev_priv(netdev);
+       int err = 0;
 
        if (devdata->old_flags == netdev->flags)
                return;
@@ -1012,10 +1051,12 @@ visornic_set_multi(struct net_device *netdev)
        cmdrsp->net.enbdis.context = netdev;
        cmdrsp->net.enbdis.enable =
                netdev->flags & IFF_PROMISC;
-       visorchannel_signalinsert(devdata->dev->visorchannel,
-                                 IOCHAN_TO_IOPART,
-                                 cmdrsp);
+       err = visorchannel_signalinsert(devdata->dev->visorchannel,
+                                       IOCHAN_TO_IOPART,
+                                       cmdrsp);
        kfree(cmdrsp);
+       if (err)
+               return;
 
 out_save_flags:
        devdata->old_flags = netdev->flags;
@@ -1108,7 +1149,12 @@ repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
                                status = -ENOMEM;
                                break;
                        }
-                       post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
+                       status = post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
+                       if (status) {
+                               kfree_skb(devdata->rcvbuf[i]);
+                               devdata->rcvbuf[i] = NULL;
+                               break;
+                       }
                        numreposted++;
                        break;
                }
@@ -1531,17 +1577,18 @@ static const struct file_operations debugfs_info_fops = {
  *     Send receive buffers to the IO Partition.
  *     Returns void
  */
-static void
+static int
 send_rcv_posts_if_needed(struct visornic_devdata *devdata)
 {
        int i;
        struct net_device *netdev;
        struct uiscmdrsp *cmdrsp = devdata->cmdrsp_rcv;
        int cur_num_rcv_bufs_to_alloc, rcv_bufs_allocated;
+       int err;
 
        /* don't do this until vnic is marked ready */
        if (!(devdata->enabled && devdata->enab_dis_acked))
-               return;
+               return 0;
 
        netdev = devdata->netdev;
        rcv_bufs_allocated = 0;
@@ -1560,11 +1607,17 @@ send_rcv_posts_if_needed(struct visornic_devdata *devdata)
                                break;
                        }
                        rcv_bufs_allocated++;
-                       post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
+                       err = post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
+                       if (err) {
+                               kfree_skb(devdata->rcvbuf[i]);
+                               devdata->rcvbuf[i] = NULL;
+                               break;
+                       }
                        devdata->chstat.extra_rcvbufs_sent++;
                }
        }
        devdata->num_rcv_bufs_could_not_alloc -= rcv_bufs_allocated;
+       return 0;
 }
 
 /*
@@ -1687,8 +1740,12 @@ static int visornic_poll(struct napi_struct *napi, int budget)
                                                        struct visornic_devdata,
                                                        napi);
        int rx_count = 0;
+       int err;
+
+       err = send_rcv_posts_if_needed(devdata);
+       if (err)
+               return err;
 
-       send_rcv_posts_if_needed(devdata);
        service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
 
        /* If there aren't any more packets to receive stop the poll */