diff options
Diffstat (limited to 'net/bridge')
-rw-r--r-- | net/bridge/Kconfig | 6 | ||||
-rw-r--r-- | net/bridge/br_device.c | 43 | ||||
-rw-r--r-- | net/bridge/br_if.c | 6 | ||||
-rw-r--r-- | net/bridge/br_input.c | 6 | ||||
-rw-r--r-- | net/bridge/br_multicast.c | 621 | ||||
-rw-r--r-- | net/bridge/br_notify.c | 4 | ||||
-rw-r--r-- | net/bridge/br_private.h | 24 |
7 files changed, 640 insertions, 70 deletions
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig index d115d5cea5b6..9190ae462cb4 100644 --- a/net/bridge/Kconfig +++ b/net/bridge/Kconfig @@ -33,14 +33,14 @@ config BRIDGE If unsure, say N. config BRIDGE_IGMP_SNOOPING - bool "IGMP snooping" + bool "IGMP/MLD snooping" depends on BRIDGE depends on INET default y ---help--- If you say Y here, then the Ethernet bridge will be able selectively - forward multicast traffic based on IGMP traffic received from each - port. + forward multicast traffic based on IGMP/MLD traffic received from + each port. Say N to exclude this support and reduce the binary size. diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 90a9024e5c1e..5b8a6e73b02f 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -26,11 +26,12 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) const unsigned char *dest = skb->data; struct net_bridge_fdb_entry *dst; struct net_bridge_mdb_entry *mdst; + struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); - BR_INPUT_SKB_CB(skb)->brdev = dev; + brstats->tx_packets++; + brstats->tx_bytes += skb->len; - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + BR_INPUT_SKB_CB(skb)->brdev = dev; skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); @@ -81,6 +82,31 @@ static int br_dev_stop(struct net_device *dev) return 0; } +static struct net_device_stats *br_get_stats(struct net_device *dev) +{ + struct net_bridge *br = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct br_cpu_netstats sum = { 0 }; + unsigned int cpu; + + for_each_possible_cpu(cpu) { + const struct br_cpu_netstats *bstats + = per_cpu_ptr(br->stats, cpu); + + sum.tx_bytes += bstats->tx_bytes; + sum.tx_packets += bstats->tx_packets; + sum.rx_bytes += bstats->rx_bytes; + sum.rx_packets += bstats->rx_packets; + } + + stats->tx_bytes = sum.tx_bytes; + stats->tx_packets = sum.tx_packets; + stats->rx_bytes = sum.rx_bytes; + stats->rx_packets = sum.rx_packets; + + return stats; +} + static int br_change_mtu(struct net_device *dev, int new_mtu) { struct net_bridge *br = netdev_priv(dev); @@ -180,19 +206,28 @@ static const struct net_device_ops br_netdev_ops = { .ndo_open = br_dev_open, .ndo_stop = br_dev_stop, .ndo_start_xmit = br_dev_xmit, + .ndo_get_stats = br_get_stats, .ndo_set_mac_address = br_set_mac_address, .ndo_set_multicast_list = br_dev_set_multicast_list, .ndo_change_mtu = br_change_mtu, .ndo_do_ioctl = br_dev_ioctl, }; +static void br_dev_free(struct net_device *dev) +{ + struct net_bridge *br = netdev_priv(dev); + + free_percpu(br->stats); + free_netdev(dev); +} + void br_dev_setup(struct net_device *dev) { random_ether_addr(dev->dev_addr); ether_setup(dev); dev->netdev_ops = &br_netdev_ops; - dev->destructor = free_netdev; + dev->destructor = br_dev_free; SET_ETHTOOL_OPS(dev, &br_ethtool_ops); dev->tx_queue_len = 0; dev->priv_flags = IFF_EBRIDGE; diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 0b6b1f2ff7ac..521439333316 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -186,6 +186,12 @@ static struct net_device *new_bridge_dev(struct net *net, const char *name) br = netdev_priv(dev); br->dev = dev; + br->stats = alloc_percpu(struct br_cpu_netstats); + if (!br->stats) { + free_netdev(dev); + return NULL; + } + spin_lock_init(&br->lock); INIT_LIST_HEAD(&br->port_list); spin_lock_init(&br->hash_lock); diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index a82dde2d2ead..e7f4c1d02f57 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -24,9 +24,11 @@ const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; static int br_pass_frame_up(struct sk_buff *skb) { struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; + struct net_bridge *br = netdev_priv(brdev); + struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); - brdev->stats.rx_packets++; - brdev->stats.rx_bytes += skb->len; + brstats->rx_packets++; + brstats->rx_bytes += skb->len; indev = skb->dev; skb->dev = brdev; diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index eaa0e1bae49b..2048ef0f9be5 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -24,51 +24,139 @@ #include <linux/slab.h> #include <linux/timer.h> #include <net/ip.h> +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#include <net/ipv6.h> +#include <net/mld.h> +#include <net/addrconf.h> +#include <net/ip6_checksum.h> +#endif #include "br_private.h" -static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static inline int ipv6_is_local_multicast(const struct in6_addr *addr) { - return jhash_1word(mdb->secret, (u32)ip) & (mdb->max - 1); + if (ipv6_addr_is_multicast(addr) && + IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL) + return 1; + return 0; +} +#endif + +static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) +{ + if (a->proto != b->proto) + return 0; + switch (a->proto) { + case htons(ETH_P_IP): + return a->u.ip4 == b->u.ip4; +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + case htons(ETH_P_IPV6): + return ipv6_addr_equal(&a->u.ip6, &b->u.ip6); +#endif + } + return 0; +} + +static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) +{ + return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1); +} + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, + const struct in6_addr *ip) +{ + return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1); +} +#endif + +static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, + struct br_ip *ip) +{ + switch (ip->proto) { + case htons(ETH_P_IP): + return __br_ip4_hash(mdb, ip->u.ip4); +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + case htons(ETH_P_IPV6): + return __br_ip6_hash(mdb, &ip->u.ip6); +#endif + } + return 0; } static struct net_bridge_mdb_entry *__br_mdb_ip_get( - struct net_bridge_mdb_htable *mdb, __be32 dst, int hash) + struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash) { struct net_bridge_mdb_entry *mp; struct hlist_node *p; hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { - if (dst == mp->addr) + if (br_ip_equal(&mp->addr, dst)) return mp; } return NULL; } -static struct net_bridge_mdb_entry *br_mdb_ip_get( +static struct net_bridge_mdb_entry *br_mdb_ip4_get( struct net_bridge_mdb_htable *mdb, __be32 dst) { - if (!mdb) - return NULL; + struct br_ip br_dst; + br_dst.u.ip4 = dst; + br_dst.proto = htons(ETH_P_IP); + + return __br_mdb_ip_get(mdb, &br_dst, __br_ip4_hash(mdb, dst)); +} + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static struct net_bridge_mdb_entry *br_mdb_ip6_get( + struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst) +{ + struct br_ip br_dst; + + ipv6_addr_copy(&br_dst.u.ip6, dst); + br_dst.proto = htons(ETH_P_IPV6); + + return __br_mdb_ip_get(mdb, &br_dst, __br_ip6_hash(mdb, dst)); +} +#endif + +static struct net_bridge_mdb_entry *br_mdb_ip_get( + struct net_bridge_mdb_htable *mdb, struct br_ip *dst) +{ return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); } struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, struct sk_buff *skb) { - if (br->multicast_disabled) + struct net_bridge_mdb_htable *mdb = br->mdb; + struct br_ip ip; + + if (!mdb || br->multicast_disabled) + return NULL; + + if (BR_INPUT_SKB_CB(skb)->igmp) return NULL; + ip.proto = skb->protocol; + switch (skb->protocol) { case htons(ETH_P_IP): - if (BR_INPUT_SKB_CB(skb)->igmp) - break; - return br_mdb_ip_get(br->mdb, ip_hdr(skb)->daddr); + ip.u.ip4 = ip_hdr(skb)->daddr; + break; +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + case htons(ETH_P_IPV6): + ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr); + break; +#endif + default: + return NULL; } - return NULL; + return br_mdb_ip_get(mdb, &ip); } static void br_mdb_free(struct rcu_head *head) @@ -95,7 +183,7 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new, for (i = 0; i < old->max; i++) hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) hlist_add_head(&mp->hlist[new->ver], - &new->mhash[br_ip_hash(new, mp->addr)]); + &new->mhash[br_ip_hash(new, &mp->addr)]); if (!elasticity) return 0; @@ -163,7 +251,7 @@ static void br_multicast_del_pg(struct net_bridge *br, struct net_bridge_port_group *p; struct net_bridge_port_group **pp; - mp = br_mdb_ip_get(mdb, pg->addr); + mp = br_mdb_ip_get(mdb, &pg->addr); if (WARN_ON(!mp)) return; @@ -249,8 +337,8 @@ out: return 0; } -static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, - __be32 group) +static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, + __be32 group) { struct sk_buff *skb; struct igmphdr *ih; @@ -314,12 +402,104 @@ out: return skb; } +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, + struct in6_addr *group) +{ + struct sk_buff *skb; + struct ipv6hdr *ip6h; + struct mld_msg *mldq; + struct ethhdr *eth; + u8 *hopopt; + unsigned long interval; + + skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) + + 8 + sizeof(*mldq)); + if (!skb) + goto out; + + skb->protocol = htons(ETH_P_IPV6); + + /* Ethernet header */ + skb_reset_mac_header(skb); + eth = eth_hdr(skb); + + memcpy(eth->h_source, br->dev->dev_addr, 6); + ipv6_eth_mc_map(group, eth->h_dest); + eth->h_proto = htons(ETH_P_IPV6); + skb_put(skb, sizeof(*eth)); + + /* IPv6 header + HbH option */ + skb_set_network_header(skb, skb->len); + ip6h = ipv6_hdr(skb); + + *(__force __be32 *)ip6h = htonl(0x60000000); + ip6h->payload_len = 8 + sizeof(*mldq); + ip6h->nexthdr = IPPROTO_HOPOPTS; + ip6h->hop_limit = 1; + ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0); + ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); + + hopopt = (u8 *)(ip6h + 1); + hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ + hopopt[1] = 0; /* length of HbH */ + hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */ + hopopt[3] = 2; /* Length of RA Option */ + hopopt[4] = 0; /* Type = 0x0000 (MLD) */ + hopopt[5] = 0; + hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */ + hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */ + + skb_put(skb, sizeof(*ip6h) + 8); + + /* ICMPv6 */ + skb_set_transport_header(skb, skb->len); + mldq = (struct mld_msg *) icmp6_hdr(skb); + + interval = ipv6_addr_any(group) ? br->multicast_last_member_interval : + br->multicast_query_response_interval; + + mldq->mld_type = ICMPV6_MGM_QUERY; + mldq->mld_code = 0; + mldq->mld_cksum = 0; + mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); + mldq->mld_reserved = 0; + ipv6_addr_copy(&mldq->mld_mca, group); + + /* checksum */ + mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + sizeof(*mldq), IPPROTO_ICMPV6, + csum_partial(mldq, + sizeof(*mldq), 0)); + skb_put(skb, sizeof(*mldq)); + + __skb_pull(skb, sizeof(*eth)); + +out: + return skb; +} +#endif + +static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, + struct br_ip *addr) +{ + switch (addr->proto) { + case htons(ETH_P_IP): + return br_ip4_multicast_alloc_query(br, addr->u.ip4); +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + case htons(ETH_P_IPV6): + return br_ip6_multicast_alloc_query(br, &addr->u.ip6); +#endif + } + return NULL; +} + static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp) { struct net_bridge *br = mp->br; struct sk_buff *skb; - skb = br_multicast_alloc_query(br, mp->addr); + skb = br_multicast_alloc_query(br, &mp->addr); if (!skb) goto timer; @@ -353,7 +533,7 @@ static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg) struct net_bridge *br = port->br; struct sk_buff *skb; - skb = br_multicast_alloc_query(br, pg->addr); + skb = br_multicast_alloc_query(br, &pg->addr); if (!skb) goto timer; @@ -383,8 +563,8 @@ out: } static struct net_bridge_mdb_entry *br_multicast_get_group( - struct net_bridge *br, struct net_bridge_port *port, __be32 group, - int hash) + struct net_bridge *br, struct net_bridge_port *port, + struct br_ip *group, int hash) { struct net_bridge_mdb_htable *mdb = br->mdb; struct net_bridge_mdb_entry *mp; @@ -396,9 +576,8 @@ static struct net_bridge_mdb_entry *br_multicast_get_group( hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { count++; - if (unlikely(group == mp->addr)) { + if (unlikely(br_ip_equal(group, &mp->addr))) return mp; - } } elasticity = 0; @@ -463,7 +642,8 @@ err: } static struct net_bridge_mdb_entry *br_multicast_new_group( - struct net_bridge *br, struct net_bridge_port *port, __be32 group) + struct net_bridge *br, struct net_bridge_port *port, + struct br_ip *group) { struct net_bridge_mdb_htable *mdb = br->mdb; struct net_bridge_mdb_entry *mp; @@ -496,7 +676,7 @@ rehash: goto out; mp->br = br; - mp->addr = group; + mp->addr = *group; setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp); setup_timer(&mp->query_timer, br_multicast_group_query_expired, @@ -510,7 +690,8 @@ out: } static int br_multicast_add_group(struct net_bridge *br, - struct net_bridge_port *port, __be32 group) + struct net_bridge_port *port, + struct br_ip *group) { struct net_bridge_mdb_entry *mp; struct net_bridge_port_group *p; @@ -518,9 +699,6 @@ static int br_multicast_add_group(struct net_bridge *br, unsigned long now = jiffies; int err; - if (ipv4_is_local_multicast(group)) - return 0; - spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || (port && port->state == BR_STATE_DISABLED)) @@ -549,7 +727,7 @@ static int br_multicast_add_group(struct net_bridge *br, if (unlikely(!p)) goto err; - p->addr = group; + p->addr = *group; p->port = port; p->next = *pp; hlist_add_head(&p->mglist, &port->mglist); @@ -570,6 +748,38 @@ err: return err; } +static int br_ip4_multicast_add_group(struct net_bridge *br, + struct net_bridge_port *port, + __be32 group) +{ + struct br_ip br_group; + + if (ipv4_is_local_multicast(group)) + return 0; + + br_group.u.ip4 = group; + br_group.proto = htons(ETH_P_IP); + + return br_multicast_add_group(br, port, &br_group); +} + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static int br_ip6_multicast_add_group(struct net_bridge *br, + struct net_bridge_port *port, + const struct in6_addr *group) +{ + struct br_ip br_group; + + if (ipv6_is_local_multicast(group)) + return 0; + + ipv6_addr_copy(&br_group.u.ip6, group); + br_group.proto = htons(ETH_P_IP); + + return br_multicast_add_group(br, port, &br_group); +} +#endif + static void br_multicast_router_expired(unsigned long data) { struct net_bridge_port *port = (void *)data; @@ -591,19 +801,15 @@ static void br_multicast_local_router_expired(unsigned long data) { } -static void br_multicast_send_query(struct net_bridge *br, - struct net_bridge_port *port, u32 sent) +static void __br_multicast_send_query(struct net_bridge *br, + struct net_bridge_port *port, + struct br_ip *ip) { - unsigned long time; struct sk_buff *skb; - if (!netif_running(br->dev) || br->multicast_disabled || - timer_pending(&br->multicast_querier_timer)) - return; - - skb = br_multicast_alloc_query(br, 0); + skb = br_multicast_alloc_query(br, ip); if (!skb) - goto timer; + return; if (port) { __skb_push(skb, sizeof(struct ethhdr)); @@ -612,8 +818,28 @@ static void br_multicast_send_query(struct net_bridge *br, dev_queue_xmit); } else netif_rx(skb); +} + +static void br_multicast_send_query(struct net_bridge *br, + struct net_bridge_port *port, u32 sent) +{ + unsigned long time; + struct br_ip br_group; + + if (!netif_running(br->dev) || br->multicast_disabled || + timer_pending(&br->multicast_querier_timer)) + return; + + memset(&br_group.u, 0, sizeof(br_group.u)); + + br_group.proto = htons(ETH_P_IP); + __br_multicast_send_query(br, port, &br_group); + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + br_group.proto = htons(ETH_P_IPV6); + __br_multicast_send_query(br, port, &br_group); +#endif -timer: time = jiffies; time += sent < br->multicast_startup_query_count ? br->multicast_startup_query_interval : @@ -698,9 +924,9 @@ void br_multicast_disable_port(struct net_bridge_port *port) spin_unlock(&br->multicast_lock); } -static int br_multicast_igmp3_report(struct net_bridge *br, - struct net_bridge_port *port, - struct sk_buff *skb) +static int br_ip4_multicast_igmp3_report(struct net_bridge *br, + struct net_bridge_port *port, + struct sk_buff *skb) { struct igmpv3_report *ih; struct igmpv3_grec *grec; @@ -745,7 +971,7 @@ static int br_multicast_igmp3_report(struct net_bridge *br, continue; } - err = br_multicast_add_group(br, port, group); + err = br_ip4_multicast_add_group(br, port, group); if (err) break; } @@ -753,6 +979,66 @@ static int br_multicast_igmp3_report(struct net_bridge *br, return err; } +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static int br_ip6_multicast_mld2_report(struct net_bridge *br, + struct net_bridge_port *port, + struct sk_buff *skb) +{ + struct icmp6hdr *icmp6h; + struct mld2_grec *grec; + int i; + int len; + int num; + int err = 0; + + if (!pskb_may_pull(skb, sizeof(*icmp6h))) + return -EINVAL; + + icmp6h = icmp6_hdr(skb); + num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); + len = sizeof(*icmp6h); + + for (i = 0; i < num; i++) { + __be16 *nsrcs, _nsrcs; + + nsrcs = skb_header_pointer(skb, + len + offsetof(struct mld2_grec, + grec_mca), + sizeof(_nsrcs), &_nsrcs); + if (!nsrcs) + return -EINVAL; + + if (!pskb_may_pull(skb, + len + sizeof(*grec) + + sizeof(struct in6_addr) * (*nsrcs))) + return -EINVAL; + + grec = (struct mld2_grec *)(skb->data + len); + len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs); + + /* We treat these as MLDv1 reports for now. */ + switch (grec->grec_type) { + case MLD2_MODE_IS_INCLUDE: + case MLD2_MODE_IS_EXCLUDE: + case MLD2_CHANGE_TO_INCLUDE: + case MLD2_CHANGE_TO_EXCLUDE: + case MLD2_ALLOW_NEW_SOURCES: + case MLD2_BLOCK_OLD_SOURCES: + break; + + default: + continue; + } + + err = br_ip6_multicast_add_group(br, port, &grec->grec_mca); + if (!err) + break; + } + + return err; +} +#endif + static void br_multicast_add_router(struct net_bridge *br, struct net_bridge_port *port) { @@ -800,7 +1086,7 @@ timer: static void br_multicast_query_received(struct net_bridge *br, struct net_bridge_port *port, - __be32 saddr) + int saddr) { if (saddr) mod_timer(&br->multicast_querier_timer, @@ -811,9 +1097,9 @@ static void br_multicast_query_received(struct net_bridge *br, br_multicast_mark_router(br, port); } -static int br_multicast_query(struct net_bridge *br, - struct net_bridge_port *port, - struct sk_buff *skb) +static int br_ip4_multicast_query(struct net_bridge *br, + struct net_bridge_port *port, + struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); struct igmphdr *ih = igmp_hdr(skb); @@ -831,7 +1117,7 @@ static int br_multicast_query(struct net_bridge *br, (port && port->state == BR_STATE_DISABLED)) goto out; - br_multicast_query_received(br, port, iph->saddr); + br_multicast_query_received(br, port, !!iph->saddr); group = ih->group; @@ -859,7 +1145,7 @@ static int br_multicast_query(struct net_bridge *br, if (!group) goto out; - mp = br_mdb_ip_get(br->mdb, group); + mp = br_mdb_ip4_get(br->mdb, group); if (!mp) goto out; @@ -883,9 +1169,78 @@ out: return err; } +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static int br_ip6_multicast_query(struct net_bridge *br, + struct net_bridge_port *port, + struct sk_buff *skb) +{ + struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb); + struct net_bridge_mdb_entry *mp; + struct mld2_query *mld2q; + struct net_bridge_port_group *p, **pp; + unsigned long max_delay; + unsigned long now = jiffies; + struct in6_addr *group = NULL; + int err = 0; + + spin_lock(&br->multicast_lock); + if (!netif_running(br->dev) || + (port && port->state == BR_STATE_DISABLED)) + goto out; + + br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr)); + + if (skb->len == sizeof(*mld)) { + if (!pskb_may_pull(skb, sizeof(*mld))) { + err = -EINVAL; + goto out; + } + mld = (struct mld_msg *) icmp6_hdr(skb); + max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay)); + if (max_delay) + group = &mld->mld_mca; + } else if (skb->len >= sizeof(*mld2q)) { + if (!pskb_may_pull(skb, sizeof(*mld2q))) { + err = -EINVAL; + goto out; + } + mld2q = (struct mld2_query *)icmp6_hdr(skb); + if (!mld2q->mld2q_nsrcs) + group = &mld2q->mld2q_mca; + max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1; + } + + if (!group) + goto out; + + mp = br_mdb_ip6_get(br->mdb, group); + if (!mp) + goto out; + + max_delay *= br->multicast_last_member_count; + if (!hlist_unhashed(&mp->mglist) && + (timer_pending(&mp->timer) ? + time_after(mp->timer.expires, now + max_delay) : + try_to_del_timer_sync(&mp->timer) >= 0)) + mod_timer(&mp->timer, now + max_delay); + + for (pp = &mp->ports; (p = *pp); pp = &p->next) { + if (timer_pending(&p->timer) ? + time_after(p->timer.expires, now + max_delay) : + try_to_del_timer_sync(&p->timer) >= 0) + mod_timer(&mp->timer, now + max_delay); + } + +out: + spin_unlock(&br->multicast_lock); + return err; +} +#endif + static void br_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, - __be32 group) + struct br_ip *group) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; @@ -893,9 +1248,6 @@ static void br_multicast_leave_group(struct net_bridge *br, unsigned long now; unsigned long time; - if (ipv4_is_local_multicast(group)) - return; - spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || (port && port->state == BR_STATE_DISABLED) || @@ -946,6 +1298,38 @@ out: spin_unlock(&br->multicast_lock); } +static void br_ip4_multicast_leave_group(struct net_bridge *br, + struct net_bridge_port *port, + __be32 group) +{ + struct br_ip br_group; + + if (ipv4_is_local_multicast(group)) + return; + + br_group.u.ip4 = group; + br_group.proto = htons(ETH_P_IP); + + br_multicast_leave_group(br, port, &br_group); +} + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static void br_ip6_multicast_leave_group(struct net_bridge *br, + struct net_bridge_port *port, + const struct in6_addr *group) +{ + struct br_ip br_group; + + if (ipv6_is_local_multicast(group)) + return; + + ipv6_addr_copy(&br_group.u.ip6, group); + br_group.proto = htons(ETH_P_IPV6); + + br_multicast_leave_group(br, port, &br_group); +} +#endif + static int br_multicast_ipv4_rcv(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) @@ -1000,8 +1384,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, if (!pskb_may_pull(skb2, sizeof(*ih))) goto out; - iph = ip_hdr(skb2); - switch (skb2->ip_summed) { case CHECKSUM_COMPLETE: if (!csum_fold(skb2->csum)) @@ -1022,16 +1404,16 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, case IGMP_HOST_MEMBERSHIP_REPORT: case IGMPV2_HOST_MEMBERSHIP_REPORT: BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; - err = br_multicast_add_group(br, port, ih->group); + err = br_ip4_multicast_add_group(br, port, ih->group); break; case IGMPV3_HOST_MEMBERSHIP_REPORT: - err = br_multicast_igmp3_report(br, port, skb2); + err = br_ip4_multicast_igmp3_report(br, port, skb2); break; case IGMP_HOST_MEMBERSHIP_QUERY: - err = br_multicast_query(br, port, skb2); + err = br_ip4_multicast_query(br, port, skb2); break; case IGMP_HOST_LEAVE_MESSAGE: - br_multicast_leave_group(br, port, ih->group); + br_ip4_multicast_leave_group(br, port, ih->group); break; } @@ -1043,6 +1425,123 @@ err_out: return err; } +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static int br_multicast_ipv6_rcv(struct net_bridge *br, + struct net_bridge_port *port, + struct sk_buff *skb) +{ + struct sk_buff *skb2 = skb; + struct ipv6hdr *ip6h; + struct icmp6hdr *icmp6h; + u8 nexthdr; + unsigned len; + unsigned offset; + int err; + + if (!pskb_may_pull(skb, sizeof(*ip6h))) + return -EINVAL; + + ip6h = ipv6_hdr(skb); + + /* + * We're interested in MLD messages only. + * - Version is 6 + * - MLD has always Router Alert hop-by-hop option + * - But we do not support jumbrograms. + */ + if (ip6h->version != 6 || + ip6h->nexthdr != IPPROTO_HOPOPTS || + ip6h->payload_len == 0) + return 0; + + len = ntohs(ip6h->payload_len); + if (skb->len < len) + return -EINVAL; + + nexthdr = ip6h->nexthdr; + offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr); + + if (offset < 0 || nexthdr != IPPROTO_ICMPV6) + return 0; + + /* Okay, we found ICMPv6 header */ + skb2 = skb_clone(skb, GFP_ATOMIC); + if (!skb2) + return -ENOMEM; + + len -= offset - skb_network_offset(skb2); + + __skb_pull(skb2, offset); + skb_reset_transport_header(skb2); + + err = -EINVAL; + if (!pskb_may_pull(skb2, sizeof(*icmp6h))) + goto out; + + icmp6h = icmp6_hdr(skb2); + + switch (icmp6h->icmp6_type) { + case ICMPV6_MGM_QUERY: + case ICMPV6_MGM_REPORT: + case ICMPV6_MGM_REDUCTION: + case ICMPV6_MLD2_REPORT: + break; + default: + err = 0; + goto out; + } + + /* Okay, we found MLD message. Check further. */ + if (skb2->len > len) { + err = pskb_trim_rcsum(skb2, len); + if (err) + goto out; + } + + switch (skb2->ip_summed) { + case CHECKSUM_COMPLETE: + if (!csum_fold(skb2->csum)) + break; + /*FALLTHROUGH*/ + case CHECKSUM_NONE: + skb2->csum = 0; + if (skb_checksum_complete(skb2)) + goto out; + } + + err = 0; + + BR_INPUT_SKB_CB(skb)->igmp = 1; + + switch (icmp6h->icmp6_type) { + case ICMPV6_MGM_REPORT: + { + struct mld_msg *mld = (struct mld_msg *)icmp6h; + BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; + err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); + break; + } + case ICMPV6_MLD2_REPORT: + err = br_ip6_multicast_mld2_report(br, port, skb2); + break; + case ICMPV6_MGM_QUERY: + err = br_ip6_multicast_query(br, port, skb2); + break; + case ICMPV6_MGM_REDUCTION: + { + struct mld_msg *mld = (struct mld_msg *)icmp6h; + br_ip6_multicast_leave_group(br, port, &mld->mld_mca); + } + } + +out: + __skb_push(skb2, offset); + if (skb2 != skb) + kfree_skb(skb2); + return err; +} +#endif + int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { @@ -1055,6 +1554,10 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, switch (skb->protocol) { case htons(ETH_P_IP): return br_multicast_ipv4_rcv(br, port, skb); +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + case htons(ETH_P_IPV6): + return br_multicast_ipv6_rcv(br, port, skb); +#endif } return 0; diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index 763a3ec292e5..1413b72acc7f 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c @@ -82,6 +82,10 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v case NETDEV_UNREGISTER: br_del_if(br, dev); break; + + case NETDEV_PRE_TYPE_CHANGE: + /* Forbid underlaying device to change its type. */ + return NOTIFY_BAD; } /* Events that may cause spanning tree to refresh */ diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 846d7d1e2075..018499ebe19d 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -45,6 +45,17 @@ struct mac_addr unsigned char addr[6]; }; +struct br_ip +{ + union { + __be32 ip4; +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + struct in6_addr ip6; +#endif + } u; + __be16 proto; +}; + struct net_bridge_fdb_entry { struct hlist_node hlist; @@ -64,7 +75,7 @@ struct net_bridge_port_group { struct rcu_head rcu; struct timer_list timer; struct timer_list query_timer; - __be32 addr; + struct br_ip addr; u32 queries_sent; }; @@ -77,7 +88,7 @@ struct net_bridge_mdb_entry struct rcu_head rcu; struct timer_list timer; struct timer_list query_timer; - __be32 addr; + struct br_ip addr; u32 queries_sent; }; @@ -130,11 +141,20 @@ struct net_bridge_port #endif }; +struct br_cpu_netstats { + unsigned long rx_packets; + unsigned long rx_bytes; + unsigned long tx_packets; + unsigned long tx_bytes; +}; + struct net_bridge { spinlock_t lock; struct list_head port_list; struct net_device *dev; + + struct br_cpu_netstats __percpu *stats; spinlock_t hash_lock; struct hlist_head hash[BR_HASH_SIZE]; unsigned long feature_mask; |