Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next...
authorDavid S. Miller <davem@davemloft.net>
Tue, 2 Oct 2018 18:39:09 +0000 (11:39 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 2 Oct 2018 18:39:09 +0000 (11:39 -0700)
Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2018-10-02

This series contains updates to ice driver only.

Anirudh expands the use of VSI handles across the rest of the driver,
which includes refactoring the code to correctly use VSI handles.  After
a reset, ensure that all configurations for a VSI get re-applied before
moving on to rebuilding the next VSI.

Dave fixed the driver to check the current link state after reset to
ensure that the correct link state of a port is reported.  Fixed an
issue where if the driver is unloaded when traffic is in progress,
errors are generated.

Preethi breaks up the IRQ tracker into a software and hardware IRQ
tracker, where the software IRQ tracker tracks only the PF's IRQ
requests and does not play any role in the VF initialization.  The
hardware IRQ tracker represents the device's interrupt space and will be
looked up to see if the device has run our of interrupts when a
interrupt has to be allocated in the device for either PF or VF.

Md Fahad adds support for enabling/disabling RSS via ethtool.

Brett aligns the ice_reset_req enum values to the values that the
hardware understands.  Also added initial support for dynamic interrupt
moderation in the ice driver.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/isdn/hisax/w6692.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
include/linux/qed/qed_if.h
include/linux/skbuff.h
net/core/skbuff.c

index bb8e4b7e34ea8d7011d57812c90777d680066bdc..36eefaa3a7d9ac0a726b40bcc4dd32e5dfdb379b 100644 (file)
@@ -72,7 +72,7 @@ W6692_new_ph(struct IsdnCardState *cs)
        case (W_L1CMD_RST):
                ph_command(cs, W_L1CMD_DRC);
                l1_msg(cs, HW_RESET | INDICATION, NULL);
-               /* fallthru */
+               /* fall through */
        case (W_L1IND_CD):
                l1_msg(cs, HW_DEACTIVATE | CONFIRM, NULL);
                break;
index a8369addfe688412212d8eb7716f6cbbcbb41d28..ad898e8eaca1609046fbb9c020c7ad2fb547864f 100644 (file)
@@ -2364,8 +2364,13 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
-       ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
-       ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
+       if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
+               ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
+               ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
+       } else {
+               ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
+               ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
+       }
        ring->rx_mini_max_pending = 0;
        ring->rx_jumbo_max_pending = 0;
        ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
@@ -2378,21 +2383,23 @@ static int ibmvnic_set_ringparam(struct net_device *netdev,
                                 struct ethtool_ringparam *ring)
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+       int ret;
 
-       if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq  ||
-           ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
-               netdev_err(netdev, "Invalid request.\n");
-               netdev_err(netdev, "Max tx buffers = %llu\n",
-                          adapter->max_rx_add_entries_per_subcrq);
-               netdev_err(netdev, "Max rx buffers = %llu\n",
-                          adapter->max_tx_entries_per_subcrq);
-               return -EINVAL;
-       }
-
+       ret = 0;
        adapter->desired.rx_entries = ring->rx_pending;
        adapter->desired.tx_entries = ring->tx_pending;
 
-       return wait_for_reset(adapter);
+       ret = wait_for_reset(adapter);
+
+       if (!ret &&
+           (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
+            adapter->req_tx_entries_per_subcrq != ring->tx_pending))
+               netdev_info(netdev,
+                           "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
+                           ring->rx_pending, ring->tx_pending,
+                           adapter->req_rx_add_entries_per_subcrq,
+                           adapter->req_tx_entries_per_subcrq);
+       return ret;
 }
 
 static void ibmvnic_get_channels(struct net_device *netdev,
@@ -2400,8 +2407,14 @@ static void ibmvnic_get_channels(struct net_device *netdev,
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
-       channels->max_rx = adapter->max_rx_queues;
-       channels->max_tx = adapter->max_tx_queues;
+       if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
+               channels->max_rx = adapter->max_rx_queues;
+               channels->max_tx = adapter->max_tx_queues;
+       } else {
+               channels->max_rx = IBMVNIC_MAX_QUEUES;
+               channels->max_tx = IBMVNIC_MAX_QUEUES;
+       }
+
        channels->max_other = 0;
        channels->max_combined = 0;
        channels->rx_count = adapter->req_rx_queues;
@@ -2414,11 +2427,23 @@ static int ibmvnic_set_channels(struct net_device *netdev,
                                struct ethtool_channels *channels)
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+       int ret;
 
+       ret = 0;
        adapter->desired.rx_queues = channels->rx_count;
        adapter->desired.tx_queues = channels->tx_count;
 
-       return wait_for_reset(adapter);
+       ret = wait_for_reset(adapter);
+
+       if (!ret &&
+           (adapter->req_rx_queues != channels->rx_count ||
+            adapter->req_tx_queues != channels->tx_count))
+               netdev_info(netdev,
+                           "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
+                           channels->rx_count, channels->tx_count,
+                           adapter->req_rx_queues, adapter->req_tx_queues);
+       return ret;
+
 }
 
 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2426,32 +2451,43 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
        struct ibmvnic_adapter *adapter = netdev_priv(dev);
        int i;
 
-       if (stringset != ETH_SS_STATS)
-               return;
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
+                               i++, data += ETH_GSTRING_LEN)
+                       memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
 
-       for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
-               memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
+               for (i = 0; i < adapter->req_tx_queues; i++) {
+                       snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
+                       data += ETH_GSTRING_LEN;
 
-       for (i = 0; i < adapter->req_tx_queues; i++) {
-               snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
-               data += ETH_GSTRING_LEN;
+                       snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
+                       data += ETH_GSTRING_LEN;
 
-               snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
-               data += ETH_GSTRING_LEN;
+                       snprintf(data, ETH_GSTRING_LEN,
+                                "tx%d_dropped_packets", i);
+                       data += ETH_GSTRING_LEN;
+               }
 
-               snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
-               data += ETH_GSTRING_LEN;
-       }
+               for (i = 0; i < adapter->req_rx_queues; i++) {
+                       snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
+                       data += ETH_GSTRING_LEN;
 
-       for (i = 0; i < adapter->req_rx_queues; i++) {
-               snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
-               data += ETH_GSTRING_LEN;
+                       snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
+                       data += ETH_GSTRING_LEN;
 
-               snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
-               data += ETH_GSTRING_LEN;
+                       snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
+                       data += ETH_GSTRING_LEN;
+               }
+               break;
 
-               snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
-               data += ETH_GSTRING_LEN;
+       case ETH_SS_PRIV_FLAGS:
+               for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
+                       strcpy(data + i * ETH_GSTRING_LEN,
+                              ibmvnic_priv_flags[i]);
+               break;
+       default:
+               return;
        }
 }
 
@@ -2464,6 +2500,8 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
                return ARRAY_SIZE(ibmvnic_stats) +
                       adapter->req_tx_queues * NUM_TX_STATS +
                       adapter->req_rx_queues * NUM_RX_STATS;
+       case ETH_SS_PRIV_FLAGS:
+               return ARRAY_SIZE(ibmvnic_priv_flags);
        default:
                return -EOPNOTSUPP;
        }
@@ -2514,6 +2552,25 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
        }
 }
 
+static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
+{
+       struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+
+       return adapter->priv_flags;
+}
+
+static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+       struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+       bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
+
+       if (which_maxes)
+               adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
+       else
+               adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
+
+       return 0;
+}
 static const struct ethtool_ops ibmvnic_ethtool_ops = {
        .get_drvinfo            = ibmvnic_get_drvinfo,
        .get_msglevel           = ibmvnic_get_msglevel,
@@ -2527,6 +2584,8 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
        .get_sset_count         = ibmvnic_get_sset_count,
        .get_ethtool_stats      = ibmvnic_get_ethtool_stats,
        .get_link_ksettings     = ibmvnic_get_link_ksettings,
+       .get_priv_flags         = ibmvnic_get_priv_flags,
+       .set_priv_flags         = ibmvnic_set_priv_flags,
 };
 
 /* Routines for managing CRQs/sCRQs  */
index f06eec145ca60689bef26f119ed18f372867924a..18103b811d4db398df7ce6a6e27c6bda2077c4c2 100644 (file)
@@ -39,7 +39,8 @@
 #define IBMVNIC_RX_WEIGHT              16
 /* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
 #define IBMVNIC_BUFFS_PER_POOL 100
-#define IBMVNIC_MAX_QUEUES     10
+#define IBMVNIC_MAX_QUEUES     16
+#define IBMVNIC_MAX_QUEUE_SZ   4096
 
 #define IBMVNIC_TSO_BUF_SZ     65536
 #define IBMVNIC_TSO_BUFS       64
 #define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
 #define IBMVNIC_BUFFER_HLEN 500
 
+static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
+#define IBMVNIC_USE_SERVER_MAXES 0x1
+       "use-server-maxes"
+};
+
 struct ibmvnic_login_buffer {
        __be32 len;
        __be32 version;
@@ -969,6 +975,7 @@ struct ibmvnic_adapter {
        struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
        dma_addr_t ip_offload_ctrl_tok;
        u32 msg_enable;
+       u32 priv_flags;
 
        /* Vital Product Data (VPD) */
        struct ibmvnic_vpd *vpd;
index 0fbeafeef7a04afd6534e634359c279432d47eee..7ceb2b97538d25d767c3d8cc7e7ab79d8b03e760 100644 (file)
@@ -2679,6 +2679,9 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
                link->speed.forced_speed = 10000;
                break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_20G:
+               link->speed.forced_speed = 20000;
+               break;
        case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
                link->speed.forced_speed = 25000;
                break;
index d4d08383c75334c8991993871012c4e66debbe47..56578f888b705bf4e9c03c6fdb9bf73cf5ec921c 100644 (file)
@@ -13154,6 +13154,7 @@ struct nvm_cfg1_port {
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET         0
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G             0x1
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G            0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G             0x4
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G            0x8
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G            0x10
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G            0x20
@@ -13164,6 +13165,7 @@ struct nvm_cfg1_port {
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG                   0x0
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_1G                                0x1
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_10G                       0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G                        0x3
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_25G                       0x4
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_40G                       0x5
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_50G                       0x6
index 2094d86a7a087dac2eed0fe77bd71f66e7d975c7..75d217aaf8cec142dbe572eb6f5abc101acabf31 100644 (file)
@@ -1337,6 +1337,9 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
                if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
                        link_params->speed.advertised_speeds |=
                            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+               if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT)
+                       link_params->speed.advertised_speeds |=
+                               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
                if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
                        link_params->speed.advertised_speeds |=
                            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
@@ -1502,6 +1505,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
        if (params.speed.advertised_speeds &
            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
                if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
+       if (params.speed.advertised_speeds &
+           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
+               if_link->advertised_caps |= QED_LM_20000baseKR2_Full_BIT;
        if (params.speed.advertised_speeds &
            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
                if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
@@ -1522,6 +1528,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
        if (link_caps.speed_capabilities &
            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
                if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
+       if (link_caps.speed_capabilities &
+           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
+               if_link->supported_caps |= QED_LM_20000baseKR2_Full_BIT;
        if (link_caps.speed_capabilities &
            NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
                if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
@@ -1559,6 +1568,8 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
                if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
        if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
                if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
+       if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G)
+               if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT;
        if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
                if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
        if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
index 19652cd27ca78afd8de31214b59f5f7f310a8f1c..7ff50b4488f61ab66d3b8926766c187e85057493 100644 (file)
@@ -420,6 +420,7 @@ static const struct qede_link_mode_mapping qed_lm_map[] = {
        {QED_LM_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Half_BIT},
        {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
        {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
+       {QED_LM_20000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT},
        {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
        {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
        {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
@@ -535,6 +536,14 @@ static int qede_set_link_ksettings(struct net_device *dev,
                        }
                        params.adv_speeds = QED_LM_10000baseKR_Full_BIT;
                        break;
+               case SPEED_20000:
+                       if (!(current_link.supported_caps &
+                             QED_LM_20000baseKR2_Full_BIT)) {
+                               DP_INFO(edev, "20G speed not supported\n");
+                               return -EINVAL;
+                       }
+                       params.adv_speeds = QED_LM_20000baseKR2_Full_BIT;
+                       break;
                case SPEED_25000:
                        if (!(current_link.supported_caps &
                              QED_LM_25000baseKR_Full_BIT)) {
index 8cd34645e892623b71d5ee446dfb6a4423594e93..dee3c9c744f7526bbcb8ffa061ab22e17a9c13a4 100644 (file)
@@ -670,10 +670,11 @@ enum qed_link_mode_bits {
        QED_LM_1000baseT_Half_BIT = BIT(4),
        QED_LM_1000baseT_Full_BIT = BIT(5),
        QED_LM_10000baseKR_Full_BIT = BIT(6),
-       QED_LM_25000baseKR_Full_BIT = BIT(7),
-       QED_LM_40000baseLR4_Full_BIT = BIT(8),
-       QED_LM_50000baseKR2_Full_BIT = BIT(9),
-       QED_LM_100000baseKR4_Full_BIT = BIT(10),
+       QED_LM_20000baseKR2_Full_BIT = BIT(7),
+       QED_LM_25000baseKR_Full_BIT = BIT(8),
+       QED_LM_40000baseLR4_Full_BIT = BIT(9),
+       QED_LM_50000baseKR2_Full_BIT = BIT(10),
+       QED_LM_100000baseKR4_Full_BIT = BIT(11),
        QED_LM_COUNT = 11
 };
 
index 87e29710373f475816adc7acce7d46accedbc872..119d092c6b1324111ee81bb7ef0ab4b0fff7749d 100644 (file)
@@ -1082,11 +1082,6 @@ static inline int skb_pad(struct sk_buff *skb, int pad)
 }
 #define dev_kfree_skb(a)       consume_skb(a)
 
-int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
-                           int getfrag(void *from, char *to, int offset,
-                                       int len, int odd, struct sk_buff *skb),
-                           void *from, int length);
-
 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
                         int offset, size_t size);
 
index b2c807f67aba5847fa0c9f07adabbff7cf1afd22..0e937d3d85b556e8738717a6ddec1bd5ecde7b6f 100644 (file)
@@ -3381,64 +3381,6 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
 }
 EXPORT_SYMBOL(skb_find_text);
 
-/**
- * skb_append_datato_frags - append the user data to a skb
- * @sk: sock  structure
- * @skb: skb structure to be appended with user data.
- * @getfrag: call back function to be used for getting the user data
- * @from: pointer to user message iov
- * @length: length of the iov message
- *
- * Description: This procedure append the user data in the fragment part
- * of the skb if any page alloc fails user this procedure returns  -ENOMEM
- */
-int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
-                       int (*getfrag)(void *from, char *to, int offset,
-                                       int len, int odd, struct sk_buff *skb),
-                       void *from, int length)
-{
-       int frg_cnt = skb_shinfo(skb)->nr_frags;
-       int copy;
-       int offset = 0;
-       int ret;
-       struct page_frag *pfrag = &current->task_frag;
-
-       do {
-               /* Return error if we don't have space for new frag */
-               if (frg_cnt >= MAX_SKB_FRAGS)
-                       return -EMSGSIZE;
-
-               if (!sk_page_frag_refill(sk, pfrag))
-                       return -ENOMEM;
-
-               /* copy the user data to page */
-               copy = min_t(int, length, pfrag->size - pfrag->offset);
-
-               ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
-                             offset, copy, 0, skb);
-               if (ret < 0)
-                       return -EFAULT;
-
-               /* copy was successful so update the size parameters */
-               skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
-                                  copy);
-               frg_cnt++;
-               pfrag->offset += copy;
-               get_page(pfrag->page);
-
-               skb->truesize += copy;
-               refcount_add(copy, &sk->sk_wmem_alloc);
-               skb->len += copy;
-               skb->data_len += copy;
-               offset += copy;
-               length -= copy;
-
-       } while (length > 0);
-
-       return 0;
-}
-EXPORT_SYMBOL(skb_append_datato_frags);
-
 int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
                         int offset, size_t size)
 {