M: Haavard Skinnemoen <hskinnemoen@gmail.com>
M: Hans-Christian Egtvedt <egtvedt@samfundet.no>
W: http://www.atmel.com/products/AVR32/
- W: http://avr32linux.org/
+ W: http://mirror.egtvedt.no/avr32linux.org/
W: http://avrfreaks.net/
S: Maintained
F: arch/avr32/
F: include/linux/netfilter_bridge/
F: net/bridge/
-ETHERTEAM 16I DRIVER
-M: Mika Kuoppala <miku@iki.fi>
-S: Maintained
-F: drivers/net/ethernet/fujitsu/eth16i.c
-
EXT2 FILE SYSTEM
M: Jan Kara <jack@suse.cz>
L: linux-ext4@vger.kernel.org
F: include/uapi/linux/nfs*
F: include/uapi/linux/sunrpc/
-NI5010 NETWORK DRIVER
-M: Jan-Pascal van Best <janpascal@vanbest.org>
-M: Andreas Mohr <andi@lisas.de>
-L: netdev@vger.kernel.org
-S: Maintained
-F: drivers/net/ethernet/racal/ni5010.*
-
NILFS2 FILESYSTEM
M: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp>
L: linux-nilfs@vger.kernel.org
F: sound/
SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
- M: Liam Girdwood <lrg@ti.com>
+ M: Liam Girdwood <lgirdwood@gmail.com>
M: Mark Brown <broonie@opensource.wolfsonmicro.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
int bcma_bus_suspend(struct bcma_bus *bus);
int bcma_bus_resume(struct bcma_bus *bus);
#endif
+struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
+ u8 unit);
/* scan.c */
int bcma_bus_scan(struct bcma_bus *bus);
/* driver_chipcommon.c */
#ifdef CONFIG_BCMA_DRIVER_MIPS
void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+extern struct platform_device bcma_pflash_dev;
#endif /* CONFIG_BCMA_DRIVER_MIPS */
/* driver_chipcommon_pmu.c */
#ifdef CONFIG_BCMA_DRIVER_GPIO
/* driver_gpio.c */
int bcma_gpio_init(struct bcma_drv_cc *cc);
+ int bcma_gpio_unregister(struct bcma_drv_cc *cc);
#else
static inline int bcma_gpio_init(struct bcma_drv_cc *cc)
{
return -ENOTSUPP;
}
+ static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc)
+ {
+ return 0;
+ }
#endif /* CONFIG_BCMA_DRIVER_GPIO */
#endif
bcma_chipco_gpio_pullup(cc, 1 << gpio, 0);
}
+static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+ struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
+
+ if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
+ return bcma_core_irq(cc->core);
+ else
+ return -EINVAL;
+}
+
int bcma_gpio_init(struct bcma_drv_cc *cc)
{
struct gpio_chip *chip = &cc->gpio;
chip->set = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
+ chip->to_irq = bcma_gpio_to_irq;
chip->ngpio = 16;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
return gpiochip_add(chip);
}
+
+ int bcma_gpio_unregister(struct bcma_drv_cc *cc)
+ {
+ return gpiochip_remove(&cc->gpio);
+ }
}
EXPORT_SYMBOL_GPL(bcma_find_core);
-static struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
- u8 unit)
+struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
+ u8 unit)
{
struct bcma_device *core;
dev_id++;
}
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+ if (bus->drv_cc.pflash.present) {
+ err = platform_device_register(&bcma_pflash_dev);
+ if (err)
+ bcma_err(bus, "Error registering parallel flash\n");
+ }
+#endif
+
#ifdef CONFIG_BCMA_SFLASH
if (bus->drv_cc.sflash.present) {
err = platform_device_register(&bcma_sflash_dev);
void bcma_bus_unregister(struct bcma_bus *bus)
{
struct bcma_device *cores[3];
+ int err;
+
+ err = bcma_gpio_unregister(&bus->drv_cc);
+ if (err == -EBUSY)
+ bcma_err(bus, "Some GPIOs are still in use.\n");
+ else if (err)
+ bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
}
}
- if ((dev_cap->flags &
+ if ((dev->caps.flags &
(MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
mlx4_is_master(dev))
dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
if (mlx4_is_master(dev))
mlx4_parav_master_pf_caps(dev);
- priv->fs_hash_mode = MLX4_FS_L2_HASH;
-
- switch (priv->fs_hash_mode) {
- case MLX4_FS_L2_HASH:
- init_hca.fs_hash_enable_bits = 0;
- break;
-
- case MLX4_FS_L2_L3_L4_HASH:
- /* Enable flow steering with
- * udp unicast and tcp unicast
- */
- init_hca.fs_hash_enable_bits =
- MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN;
- break;
- }
-
profile = default_profile;
if (dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED)
info->dev = dev;
info->port = port;
if (!mlx4_is_slave(dev)) {
- INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
mlx4_init_mac_table(dev, &info->mac_table);
mlx4_init_vlan_table(dev, &info->vlan_table);
- info->base_qpn =
- dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
- (port - 1) * (1 << log_num_mac);
+ info->base_qpn = mlx4_get_base_qpn(dev, port);
}
sprintf(info->dev_name, "mlx4_port%d", port);
/* Allow large DMA segments, up to the firmware limit of 1 GB */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
- priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
- dev_err(&pdev->dev, "Device struct alloc failed, "
- "aborting.\n");
err = -ENOMEM;
goto err_release_regions;
}
dev->num_slaves = MLX4_MAX_NUM_SLAVES;
else {
dev->num_slaves = 0;
- if (mlx4_multi_func_init(dev)) {
+ err = mlx4_multi_func_init(dev);
+ if (err) {
mlx4_err(dev, "Failed to init slave mfunc"
" interface, aborting.\n");
goto err_cmd;
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
if (mlx4_is_master(dev)) {
- if (mlx4_multi_func_init(dev)) {
+ err = mlx4_multi_func_init(dev);
+ if (err) {
mlx4_err(dev, "Failed to init master mfunc"
"interface, aborting.\n");
goto err_close;
mlx4_enable_msi_x(dev);
if ((mlx4_is_mfunc(dev)) &&
!(dev->flags & MLX4_FLAG_MSI_X)) {
+ err = -ENOSYS;
mlx4_err(dev, "INTx is not supported in multi-function mode."
" aborting.\n");
goto err_free_eq;
{
struct usbnet *dev = netdev_priv(net);
- strncpy(info->driver, dev->driver_name, sizeof(info->driver));
- strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
- strncpy(info->fw_version, dev->driver_info->description,
+ strlcpy(info->driver, dev->driver_name, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, dev->driver_info->description,
sizeof(info->fw_version));
usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
}
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
.driver_info = (unsigned long)&wwan_info,
},
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
+ .driver_info = (unsigned long)&wwan_info,
+ },
/* Infineon(now Intel) HSPA Modem platform */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
#include "debug.h"
#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
+ #define BRCMS_FLUSH_TIMEOUT 500 /* msec */
/* Flags we support */
#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
return -EOPNOTSUPP;
}
+ spin_lock_bh(&wl->lock);
+ memcpy(wl->pub->cur_etheraddr, vif->addr, sizeof(vif->addr));
wl->mute_tx = false;
brcms_c_mute(wl->wlc, false);
+ spin_unlock_bh(&wl->lock);
return 0;
}
if (changed & BSS_CHANGED_ARP_FILTER) {
/* Hardware ARP filter address list or state changed */
- brcms_err(core, "%s: arp filtering: enabled %s, count %d"
- " (implement)\n", __func__, info->arp_filter_enabled ?
- "true" : "false", info->arp_addr_cnt);
+ brcms_err(core, "%s: arp filtering: %d addresses"
+ " (implement)\n", __func__, info->arp_addr_cnt);
}
if (changed & BSS_CHANGED_QOS) {
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
- case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
spin_lock_bh(&wl->lock);
brcms_c_ampdu_flush(wl->wlc, sta, tid);
spin_unlock_bh(&wl->lock);
wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
}
+ static bool brcms_tx_flush_completed(struct brcms_info *wl)
+ {
+ bool result;
+
+ spin_lock_bh(&wl->lock);
+ result = brcms_c_tx_flush_completed(wl->wlc);
+ spin_unlock_bh(&wl->lock);
+ return result;
+ }
+
static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
{
struct brcms_info *wl = hw->priv;
+ int ret;
no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
- /* wait for packet queue and dma fifos to run empty */
- spin_lock_bh(&wl->lock);
- brcms_c_wait_for_tx_completion(wl->wlc, drop);
- spin_unlock_bh(&wl->lock);
+ ret = wait_event_timeout(wl->tx_flush_wq,
+ brcms_tx_flush_completed(wl),
+ msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT));
+
+ brcms_dbg_mac80211(wl->wlc->hw->d11core,
+ "ret=%d\n", jiffies_to_msecs(ret));
}
static const struct ieee80211_ops brcms_ops = {
done:
spin_unlock_bh(&wl->lock);
+ wake_up(&wl->tx_flush_wq);
}
/*
atomic_set(&wl->callbacks, 0);
+ init_waitqueue_head(&wl->tx_flush_wq);
+
/* setup the bottom half handler */
tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
spin_lock_bh(&wl->lock);
return blocked;
}
-
- /*
- * precondition: perimeter lock has been acquired
- */
- void brcms_msleep(struct brcms_info *wl, uint ms)
- {
- spin_unlock_bh(&wl->lock);
- msleep(ms);
- spin_lock_bh(&wl->lock);
- }
static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx)
{
static const u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
+ u8 *ethaddr = wlc_hw->wlc->pub->cur_etheraddr;
if (mute_tx) {
/* suspend tx fifos */
brcms_b_tx_fifo_suspend(wlc_hw, TX_AC_VI_FIFO);
/* zero the address match register so we do not send ACKs */
- brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET,
- null_ether_addr);
+ brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET, null_ether_addr);
} else {
/* resume tx fifos */
brcms_b_tx_fifo_resume(wlc_hw, TX_DATA_FIFO);
brcms_b_tx_fifo_resume(wlc_hw, TX_AC_VI_FIFO);
/* Restore address */
- brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET,
- wlc_hw->etheraddr);
+ brcms_b_set_addrmatch(wlc_hw, RCM_MAC_OFFSET, ethaddr);
}
wlc_phy_mute_upd(wlc_hw->band->pi, mute_tx, 0);
return wlc->band->bandunit;
}
- void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
+ bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc)
{
- int timeout = 20;
int i;
/* Kick DMA to send any pending AMPDU */
for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
if (wlc->hw->di[i])
- dma_txflush(wlc->hw->di[i]);
+ dma_kick_tx(wlc->hw->di[i]);
- /* wait for queue and DMA fifos to run dry */
- while (brcms_txpktpendtot(wlc) > 0) {
- brcms_msleep(wlc->wl, 1);
-
- if (--timeout == 0)
- break;
- }
-
- WARN_ON_ONCE(timeout == 0);
+ return !brcms_txpktpendtot(wlc);
}
void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
uint n = 0;
uint bound_limit = bound ? RXBND : -1;
- bool morepending;
+ bool morepending = false;
skb_queue_head_init(&recv_frames);
.ndo_stop = xenvif_close,
.ndo_change_mtu = xenvif_change_mtu,
.ndo_fix_features = xenvif_fix_features,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
};
struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
return err;
}
- void xenvif_disconnect(struct xenvif *vif)
+ void xenvif_carrier_off(struct xenvif *vif)
{
struct net_device *dev = vif->dev;
- if (netif_carrier_ok(dev)) {
- rtnl_lock();
- netif_carrier_off(dev); /* discard queued packets */
- if (netif_running(dev))
- xenvif_down(vif);
- rtnl_unlock();
- xenvif_put(vif);
- }
+
+ rtnl_lock();
+ netif_carrier_off(dev); /* discard queued packets */
+ if (netif_running(dev))
+ xenvif_down(vif);
+ rtnl_unlock();
+ xenvif_put(vif);
+ }
+
+ void xenvif_disconnect(struct xenvif *vif)
+ {
+ if (netif_carrier_ok(vif->dev))
+ xenvif_carrier_off(vif);
atomic_dec(&vif->refcnt);
wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 0);
}
+static int ssb_gpio_chipco_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ if (bus->bustype == SSB_BUSTYPE_SSB)
+ return ssb_mips_irq(bus->chipco.dev) + 2;
+ else
+ return -EINVAL;
+}
+
static int ssb_gpio_chipco_init(struct ssb_bus *bus)
{
struct gpio_chip *chip = &bus->gpio;
chip->set = ssb_gpio_chipco_set_value;
chip->direction_input = ssb_gpio_chipco_direction_input;
chip->direction_output = ssb_gpio_chipco_direction_output;
+ chip->to_irq = ssb_gpio_chipco_to_irq;
chip->ngpio = 16;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
return 0;
}
+static int ssb_gpio_extif_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+ struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+
+ if (bus->bustype == SSB_BUSTYPE_SSB)
+ return ssb_mips_irq(bus->extif.dev) + 2;
+ else
+ return -EINVAL;
+}
+
static int ssb_gpio_extif_init(struct ssb_bus *bus)
{
struct gpio_chip *chip = &bus->gpio;
chip->set = ssb_gpio_extif_set_value;
chip->direction_input = ssb_gpio_extif_direction_input;
chip->direction_output = ssb_gpio_extif_direction_output;
+ chip->to_irq = ssb_gpio_extif_to_irq;
chip->ngpio = 5;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
return -1;
}
+
+ int ssb_gpio_unregister(struct ssb_bus *bus)
+ {
+ if (ssb_chipco_available(&bus->chipco) ||
+ ssb_extif_available(&bus->extif)) {
+ return gpiochip_remove(&bus->gpio);
+ } else {
+ SSB_WARN_ON(1);
+ }
+
+ return -1;
+ }
void ssb_bus_unregister(struct ssb_bus *bus)
{
+ int err;
+
+ err = ssb_gpio_unregister(bus);
+ if (err == -EBUSY)
+ ssb_dprintk(KERN_ERR PFX "Some GPIOs are still in use.\n");
+ else if (err)
+ ssb_dprintk(KERN_ERR PFX
+ "Can not unregister GPIO driver: %i\n", err);
+
ssb_buses_lock();
ssb_devices_unregister(bus);
list_del(&bus->list);
dev_idx++;
}
+#ifdef CONFIG_SSB_DRIVER_MIPS
+ if (bus->mipscore.pflash.present) {
+ err = platform_device_register(&ssb_pflash_dev);
+ if (err)
+ pr_err("Error registering parallel flash\n");
+ }
+#endif
+
return 0;
error:
/* Unwind the already registered devices. */
u32 ticks);
extern u32 ssb_chipco_watchdog_timer_set_ms(struct bcm47xx_wdt *wdt, u32 ms);
+/* driver_chipcommon_sflash.c */
+#ifdef CONFIG_SSB_SFLASH
+int ssb_sflash_init(struct ssb_chipcommon *cc);
+#else
+static inline int ssb_sflash_init(struct ssb_chipcommon *cc)
+{
+ pr_err("Serial flash not supported\n");
+ return 0;
+}
+#endif /* CONFIG_SSB_SFLASH */
+
+#ifdef CONFIG_SSB_DRIVER_MIPS
+extern struct platform_device ssb_pflash_dev;
+#endif
+
#ifdef CONFIG_SSB_DRIVER_EXTIF
extern u32 ssb_extif_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt, u32 ticks);
extern u32 ssb_extif_watchdog_timer_set_ms(struct bcm47xx_wdt *wdt, u32 ms);
#ifdef CONFIG_SSB_DRIVER_GPIO
extern int ssb_gpio_init(struct ssb_bus *bus);
+ extern int ssb_gpio_unregister(struct ssb_bus *bus);
#else /* CONFIG_SSB_DRIVER_GPIO */
static inline int ssb_gpio_init(struct ssb_bus *bus)
{
return -ENOTSUPP;
}
+ static inline int ssb_gpio_unregister(struct ssb_bus *bus)
+ {
+ return 0;
+ }
#endif /* CONFIG_SSB_DRIVER_GPIO */
#endif /* LINUX_SSB_PRIVATE_H_ */
int sysctl_tcp_fack __read_mostly = 1;
int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
EXPORT_SYMBOL(sysctl_tcp_reordering);
-int sysctl_tcp_ecn __read_mostly = 2;
-EXPORT_SYMBOL(sysctl_tcp_ecn);
int sysctl_tcp_dsack __read_mostly = 1;
int sysctl_tcp_app_win __read_mostly = 31;
int sysctl_tcp_adv_win_scale __read_mostly = 1;
int sysctl_tcp_thin_dupack __read_mostly;
int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
-int sysctl_tcp_abc __read_mostly;
int sysctl_tcp_early_retrans __read_mostly = 2;
#define FLAG_DATA 0x01 /* Incoming frame contained data. */
*/
if (!skb_shinfo(prev)->gso_size) {
skb_shinfo(prev)->gso_size = mss;
- skb_shinfo(prev)->gso_type = sk->sk_gso_type;
+ skb_shinfo(prev)->gso_type |= sk->sk_gso_type;
}
/* CHECKME: To clear or not to clear? Mimics normal skb currently */
if (skb_shinfo(skb)->gso_segs <= 1) {
skb_shinfo(skb)->gso_size = 0;
- skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_type &= SKB_GSO_SHARED_FRAG;
}
/* Difference in this won't matter, both ACKed by the same cumul. ACK */
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_time_stamp;
tp->frto_counter = 0;
- tp->bytes_acked = 0;
tp->reordering = min_t(unsigned int, tp->reordering,
sysctl_tcp_reordering);
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_time_stamp;
- tp->bytes_acked = 0;
tcp_clear_retrans_partial(tp);
if (tcp_is_reno(tp))
struct tcp_sock *tp = tcp_sk(sk);
tp->high_seq = tp->snd_nxt;
- tp->bytes_acked = 0;
tp->snd_cwnd_cnt = 0;
tp->prior_cwnd = tp->snd_cwnd;
tp->prr_delivered = 0;
struct tcp_sock *tp = tcp_sk(sk);
tp->prior_ssthresh = 0;
- tp->bytes_acked = 0;
if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
tp->undo_marker = 0;
tcp_init_cwnd_reduction(sk, set_ssthresh);
{
tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
tp->snd_cwnd_cnt = 0;
- tp->bytes_acked = 0;
TCP_ECN_queue_cwr(tp);
tcp_moderate_cwnd(tp);
}
((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED)))
tp->undo_marker = 0;
- if (!before(tp->snd_una, tp->frto_highmark) ||
- !tcp_packets_in_flight(tp)) {
+ if (!before(tp->snd_una, tp->frto_highmark)) {
tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag);
return true;
}
}
} else {
if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
+ if (!tcp_packets_in_flight(tp)) {
+ tcp_enter_frto_loss(sk, 2, flag);
+ return true;
+ }
+
/* Prevent sending of new data. */
tp->snd_cwnd = min(tp->snd_cwnd,
tcp_packets_in_flight(tp));
if (after(ack, prior_snd_una))
flag |= FLAG_SND_UNA_ADVANCED;
- if (sysctl_tcp_abc) {
- if (icsk->icsk_ca_state < TCP_CA_CWR)
- tp->bytes_acked += ack - prior_snd_una;
- else if (icsk->icsk_ca_state == TCP_CA_Loss)
- /* we assume just one segment left network */
- tp->bytes_acked += min(ack - prior_snd_una,
- tp->mss_cache);
- }
-
prior_fackets = tp->fackets_out;
prior_in_flight = tcp_packets_in_flight(tp);
* Push down and install the IP header.
*/
ipv6h = ipv6_hdr(skb);
- *(__be32 *)ipv6h = fl6->flowlabel | htonl(0x60000000);
- dsfield = INET_ECN_encapsulate(0, dsfield);
- ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
+ ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), fl6->flowlabel);
ipv6h->hop_limit = tunnel->parms.hop_limit;
ipv6h->nexthdr = proto;
ipv6h->saddr = fl6->saddr;
int ret;
if (!ip6_tnl_xmit_ctl(t))
- return -1;
+ goto tx_err;
switch (skb->protocol) {
case htons(ETH_P_IP):
struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
__be16 *p = (__be16 *)(ipv6h+1);
- *(__be32 *)ipv6h = t->fl.u.ip6.flowlabel | htonl(0x60000000);
+ ip6_flow_hdr(ipv6h, 0, t->fl.u.ip6.flowlabel);
ipv6h->hop_limit = t->parms.hop_limit;
ipv6h->nexthdr = NEXTHDR_GRE;
ipv6h->saddr = t->parms.laddr;
static atomic_t l2tp_tunnel_count;
static atomic_t l2tp_session_count;
+static struct workqueue_struct *l2tp_wq;
/* per-net private data for this module */
static unsigned int l2tp_net_id;
return net_generic(net, l2tp_net_id);
}
-
/* Tunnel reference counts. Incremented per session that is added to
* the tunnel.
*/
struct udphdr *uh;
struct inet_sock *inet;
__wsum csum;
- int old_headroom;
- int new_headroom;
int headroom;
int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
int udp_len;
*/
headroom = NET_SKB_PAD + sizeof(struct iphdr) +
uhlen + hdr_len;
- old_headroom = skb_headroom(skb);
if (skb_cow_head(skb, headroom)) {
kfree_skb(skb);
return NET_XMIT_DROP;
}
- new_headroom = skb_headroom(skb);
skb_orphan(skb);
- skb->truesize += new_headroom - old_headroom;
-
/* Setup L2TP header */
session->build_header(session, __skb_push(skb, hdr_len));
static void l2tp_tunnel_destruct(struct sock *sk)
{
struct l2tp_tunnel *tunnel;
+ struct l2tp_net *pn;
tunnel = sk->sk_user_data;
if (tunnel == NULL)
l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
- /* Close all sessions */
- l2tp_tunnel_closeall(tunnel);
+ /* Disable udp encapsulation */
switch (tunnel->encap) {
case L2TP_ENCAPTYPE_UDP:
/* No longer an encapsulation socket. See net/ipv4/udp.c */
}
/* Remove hooks into tunnel socket */
- tunnel->sock = NULL;
sk->sk_destruct = tunnel->old_sk_destruct;
sk->sk_user_data = NULL;
+ tunnel->sock = NULL;
- /* Call the original destructor */
- if (sk->sk_destruct)
- (*sk->sk_destruct)(sk);
+ /* Remove the tunnel struct from the tunnel list */
+ pn = l2tp_pernet(tunnel->l2tp_net);
+ spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+ list_del_rcu(&tunnel->list);
+ spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+ atomic_dec(&l2tp_tunnel_count);
- /* We're finished with the socket */
+ l2tp_tunnel_closeall(tunnel);
l2tp_tunnel_dec_refcount(tunnel);
+ /* Call the original destructor */
+ if (sk->sk_destruct)
+ (*sk->sk_destruct)(sk);
end:
return;
}
*/
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
{
- struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
-
BUG_ON(atomic_read(&tunnel->ref_count) != 0);
BUG_ON(tunnel->sock != NULL);
-
l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
-
- /* Remove from tunnel list */
- spin_lock_bh(&pn->l2tp_tunnel_list_lock);
- list_del_rcu(&tunnel->list);
kfree_rcu(tunnel, rcu);
- spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+}
- atomic_dec(&l2tp_tunnel_count);
+/* Workqueue tunnel deletion function */
+static void l2tp_tunnel_del_work(struct work_struct *work)
+{
+ struct l2tp_tunnel *tunnel = NULL;
+ struct socket *sock = NULL;
+ struct sock *sk = NULL;
+
+ tunnel = container_of(work, struct l2tp_tunnel, del_work);
+ sk = l2tp_tunnel_sock_lookup(tunnel);
+ if (!sk)
+ return;
+
+ sock = sk->sk_socket;
+ BUG_ON(!sock);
+
+ /* If the tunnel socket was created directly by the kernel, use the
+ * sk_* API to release the socket now. Otherwise go through the
+ * inet_* layer to shut the socket down, and let userspace close it.
+ * In either case the tunnel resources are freed in the socket
+ * destructor when the tunnel socket goes away.
+ */
+ if (sock->file == NULL) {
+ kernel_sock_shutdown(sock, SHUT_RDWR);
+ sk_release_kernel(sk);
+ } else {
+ inet_shutdown(sock, 2);
+ }
+
+ l2tp_tunnel_sock_put(sk);
}
/* Create a socket for the tunnel, if one isn't set up by
* userspace. This is used for static tunnels where there is no
* managing L2TP daemon.
+ *
+ * Since we don't want these sockets to keep a namespace alive by
+ * themselves, we drop the socket's namespace refcount after creation.
+ * These sockets are freed when the namespace exits using the pernet
+ * exit hook.
*/
-static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct socket **sockp)
+static int l2tp_tunnel_sock_create(struct net *net,
+ u32 tunnel_id,
+ u32 peer_tunnel_id,
+ struct l2tp_tunnel_cfg *cfg,
+ struct socket **sockp)
{
int err = -EINVAL;
- struct sockaddr_in udp_addr;
+ struct socket *sock = NULL;
+ struct sockaddr_in udp_addr = {0};
+ struct sockaddr_l2tpip ip_addr = {0};
#if IS_ENABLED(CONFIG_IPV6)
- struct sockaddr_in6 udp6_addr;
- struct sockaddr_l2tpip6 ip6_addr;
+ struct sockaddr_in6 udp6_addr = {0};
+ struct sockaddr_l2tpip6 ip6_addr = {0};
#endif
- struct sockaddr_l2tpip ip_addr;
- struct socket *sock = NULL;
switch (cfg->encap) {
case L2TP_ENCAPTYPE_UDP:
#if IS_ENABLED(CONFIG_IPV6)
if (cfg->local_ip6 && cfg->peer_ip6) {
- err = sock_create(AF_INET6, SOCK_DGRAM, 0, sockp);
+ err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
if (err < 0)
goto out;
- sock = *sockp;
+ sk_change_net(sock->sk, net);
- memset(&udp6_addr, 0, sizeof(udp6_addr));
udp6_addr.sin6_family = AF_INET6;
memcpy(&udp6_addr.sin6_addr, cfg->local_ip6,
sizeof(udp6_addr.sin6_addr));
} else
#endif
{
- err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp);
+ err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
if (err < 0)
goto out;
- sock = *sockp;
+ sk_change_net(sock->sk, net);
- memset(&udp_addr, 0, sizeof(udp_addr));
udp_addr.sin_family = AF_INET;
udp_addr.sin_addr = cfg->local_ip;
udp_addr.sin_port = htons(cfg->local_udp_port);
case L2TP_ENCAPTYPE_IP:
#if IS_ENABLED(CONFIG_IPV6)
if (cfg->local_ip6 && cfg->peer_ip6) {
- err = sock_create(AF_INET6, SOCK_DGRAM, IPPROTO_L2TP,
- sockp);
+ err = sock_create_kern(AF_INET6, SOCK_DGRAM,
+ IPPROTO_L2TP, &sock);
if (err < 0)
goto out;
- sock = *sockp;
+ sk_change_net(sock->sk, net);
- memset(&ip6_addr, 0, sizeof(ip6_addr));
ip6_addr.l2tp_family = AF_INET6;
memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
sizeof(ip6_addr.l2tp_addr));
} else
#endif
{
- err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP,
- sockp);
+ err = sock_create_kern(AF_INET, SOCK_DGRAM,
+ IPPROTO_L2TP, &sock);
if (err < 0)
goto out;
- sock = *sockp;
+ sk_change_net(sock->sk, net);
- memset(&ip_addr, 0, sizeof(ip_addr));
ip_addr.l2tp_family = AF_INET;
ip_addr.l2tp_addr = cfg->local_ip;
ip_addr.l2tp_conn_id = tunnel_id;
}
out:
+ *sockp = sock;
if ((err < 0) && sock) {
- sock_release(sock);
+ kernel_sock_shutdown(sock, SHUT_RDWR);
+ sk_release_kernel(sock->sk);
*sockp = NULL;
}
* kernel socket.
*/
if (fd < 0) {
- err = l2tp_tunnel_sock_create(tunnel_id, peer_tunnel_id, cfg, &sock);
+ err = l2tp_tunnel_sock_create(net, tunnel_id, peer_tunnel_id,
+ cfg, &sock);
if (err < 0)
goto err;
} else {
- err = -EBADF;
sock = sockfd_lookup(fd, &err);
if (!sock) {
- pr_err("tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
+ pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n",
tunnel_id, fd, err);
+ err = -EBADF;
+ goto err;
+ }
+
+ /* Reject namespace mismatches */
+ if (!net_eq(sock_net(sock->sk), net)) {
+ pr_err("tunl %u: netns mismatch\n", tunnel_id);
+ err = -EINVAL;
goto err;
}
}
sk->sk_allocation = GFP_ATOMIC;
+ /* Init delete workqueue struct */
+ INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
+
/* Add tunnel to our list */
INIT_LIST_HEAD(&tunnel->list);
atomic_inc(&l2tp_tunnel_count);
*/
int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
{
- int err = -EBADF;
- struct socket *sock = NULL;
- struct sock *sk = NULL;
-
- sk = l2tp_tunnel_sock_lookup(tunnel);
- if (!sk)
- goto out;
-
- sock = sk->sk_socket;
- BUG_ON(!sock);
-
- /* Force the tunnel socket to close. This will eventually
- * cause the tunnel to be deleted via the normal socket close
- * mechanisms when userspace closes the tunnel socket.
- */
- err = inet_shutdown(sock, 2);
-
- /* If the tunnel's socket was created by the kernel,
- * close the socket here since the socket was not
- * created by userspace.
- */
- if (sock->file == NULL)
- err = inet_release(sock);
-
- l2tp_tunnel_sock_put(sk);
-out:
- return err;
+ return (false == queue_work(l2tp_wq, &tunnel->del_work));
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
return 0;
}
+static __net_exit void l2tp_exit_net(struct net *net)
+{
+ struct l2tp_net *pn = l2tp_pernet(net);
+ struct l2tp_tunnel *tunnel = NULL;
+
+ rcu_read_lock_bh();
+ list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+ (void)l2tp_tunnel_delete(tunnel);
+ }
+ rcu_read_unlock_bh();
+}
+
static struct pernet_operations l2tp_net_ops = {
.init = l2tp_init_net,
+ .exit = l2tp_exit_net,
.id = &l2tp_net_id,
.size = sizeof(struct l2tp_net),
};
if (rc)
goto out;
+ l2tp_wq = alloc_workqueue("l2tp", WQ_NON_REENTRANT | WQ_UNBOUND, 0);
+ if (!l2tp_wq) {
+ pr_err("alloc_workqueue failed\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
out:
static void __exit l2tp_exit(void)
{
unregister_pernet_device(&l2tp_net_ops);
+ if (l2tp_wq) {
+ destroy_workqueue(l2tp_wq);
+ l2tp_wq = NULL;
+ }
}
module_init(l2tp_init);
return;
if (atomic_dec_and_test(&key->refcnt)) {
- kfree(key);
+ kzfree(key);
SCTP_DBG_OBJCNT_DEC(keys);
}
}
struct sctp_auth_bytes *new;
__u32 len;
__u32 offset = 0;
+ __u16 random_len, hmacs_len, chunks_len = 0;
- len = ntohs(random->param_hdr.length) + ntohs(hmacs->param_hdr.length);
- if (chunks)
- len += ntohs(chunks->param_hdr.length);
+ random_len = ntohs(random->param_hdr.length);
+ hmacs_len = ntohs(hmacs->param_hdr.length);
+ if (chunks)
+ chunks_len = ntohs(chunks->param_hdr.length);
- new = kmalloc(sizeof(struct sctp_auth_bytes) + len, gfp);
+ len = random_len + hmacs_len + chunks_len;
+
+ new = sctp_auth_create_key(len, gfp);
if (!new)
return NULL;
- new->len = len;
-
- memcpy(new->data, random, ntohs(random->param_hdr.length));
- offset += ntohs(random->param_hdr.length);
+ memcpy(new->data, random, random_len);
+ offset += random_len;
if (chunks) {
- memcpy(new->data + offset, chunks,
- ntohs(chunks->param_hdr.length));
- offset += ntohs(chunks->param_hdr.length);
+ memcpy(new->data + offset, chunks, chunks_len);
+ offset += chunks_len;
}
- memcpy(new->data + offset, hmacs, ntohs(hmacs->param_hdr.length));
+ memcpy(new->data + offset, hmacs, hmacs_len);
return new;
}
secret = sctp_auth_asoc_set_secret(ep_key, first_vector, last_vector,
gfp);
out:
- kfree(local_key_vector);
- kfree(peer_key_vector);
+ sctp_auth_key_put(local_key_vector);
+ sctp_auth_key_put(peer_key_vector);
return secret;
}