static void velocity_init_cam_filter(struct velocity_info *vptr)
{
struct mac_regs __iomem * regs = vptr->mac_regs;
- unsigned short vid;
/* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG);
mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
mac_set_cam_mask(regs, vptr->mCAMmask);
- /* Enable first VCAM */
+ /* Enable VCAMs */
if (vptr->vlgrp) {
- for (vid = 0; vid < VLAN_VID_MASK; vid++) {
- if (vlan_group_get_device(vptr->vlgrp, vid)) {
- /* If Tagging option is enabled and
- VLAN ID is not zero, then
- turn on MCFG_RTGOPT also */
- if (vid != 0)
- WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG);
+ unsigned int vid, i = 0;
+
+ if (!vlan_group_get_device(vptr->vlgrp, 0))
+ WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG);
- mac_set_vlan_cam(regs, 0, (u8 *) &vid);
+ for (vid = 1; (vid < VLAN_VID_MASK); vid++) {
+ if (vlan_group_get_device(vptr->vlgrp, vid)) {
+ mac_set_vlan_cam(regs, i, (u8 *) &vid);
+ vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
+ if (++i >= VCAM_SIZE)
+ break;
}
}
- vptr->vCAMmask[0] |= 1;
mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
- } else {
- u16 temp = 0;
- mac_set_vlan_cam(regs, 0, (u8 *) &temp);
- temp = 1;
- mac_set_vlan_cam_mask(regs, (u8 *) &temp);
}
}
+static void velocity_vlan_rx_register(struct net_device *dev,
+ struct vlan_group *grp)
+{
+ struct velocity_info *vptr = netdev_priv(dev);
+
+ vptr->vlgrp = grp;
+}
+
static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct velocity_info *vptr = netdev_priv(dev);
dev->vlan_rx_add_vid = velocity_vlan_rx_add_vid;
dev->vlan_rx_kill_vid = velocity_vlan_rx_kill_vid;
+ dev->vlan_rx_register = velocity_vlan_rx_register;
#ifdef VELOCITY_ZERO_COPY_SUPPORT
dev->features |= NETIF_F_SG;
#endif
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER;
+ dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
+ NETIF_F_HW_VLAN_RX;
if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
dev->features |= NETIF_F_IP_CSUM;
* enough. This function returns a negative value if the received
* packet is too big or if memory is exhausted.
*/
-static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
- struct velocity_info *vptr)
+static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
+ struct velocity_info *vptr)
{
int ret = -1;
-
if (pkt_size < rx_copybreak) {
struct sk_buff *new_skb;
- new_skb = dev_alloc_skb(pkt_size + 2);
+ new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2);
if (new_skb) {
- new_skb->dev = vptr->dev;
new_skb->ip_summed = rx_skb[0]->ip_summed;
-
- if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN)
- skb_reserve(new_skb, 2);
-
- skb_copy_from_linear_data(rx_skb[0], new_skb->data,
- pkt_size);
+ skb_reserve(new_skb, 2);
+ skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
*rx_skb = new_skb;
ret = 0;
}
static inline void velocity_iph_realign(struct velocity_info *vptr,
struct sk_buff *skb, int pkt_size)
{
- /* FIXME - memmove ? */
if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
- int i;
-
- for (i = pkt_size; i >= 0; i--)
- *(skb->data + i + 2) = *(skb->data + i);
+ memmove(skb->data + 2, skb->data, pkt_size);
skb_reserve(skb, 2);
}
}
skb_put(skb, pkt_len - 4);
skb->protocol = eth_type_trans(skb, vptr->dev);
+ if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) {
+ vlan_hwaccel_rx(skb, vptr->vlgrp,
+ swab16(le16_to_cpu(rd->rdesc1.PQTAG)));
+ } else
+ netif_rx(skb);
+
stats->rx_bytes += pkt_len;
- netif_rx(skb);
return 0;
}
struct rx_desc *rd = &(vptr->rd_ring[idx]);
struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
- rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64);
+ rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64);
if (rd_info->skb == NULL)
return -ENOMEM;
* 64byte alignment.
*/
skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
- rd_info->skb->dev = vptr->dev;
rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
/*