net: Kill support for multiple hh_cache entries per neighbour

This never, ever, happens.

Neighbour entries are always tied to one address family, and therefore
one set of dst_ops, and therefore one dst_ops->protocol "hh_type"
value.

This capability was blindly imported by Alexey Kuznetsov when he wrote
the neighbour layer.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2011-07-13 00:51:10 -07:00
parent e69dd336ee
commit 5c25f686db
2 changed files with 20 additions and 26 deletions

View File

@ -252,7 +252,6 @@ struct netdev_hw_addr_list {
netdev_hw_addr_list_for_each(ha, &(dev)->mc) netdev_hw_addr_list_for_each(ha, &(dev)->mc)
struct hh_cache { struct hh_cache {
struct hh_cache *hh_next; /* Next entry */
atomic_t hh_refcnt; /* number of users */ atomic_t hh_refcnt; /* number of users */
/* /*
* We want hh_output, hh_len, hh_lock and hh_data be a in a separate * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
@ -260,12 +259,8 @@ struct hh_cache {
* They are mostly read, but hh_refcnt may be changed quite frequently, * They are mostly read, but hh_refcnt may be changed quite frequently,
* incurring cache line ping pongs. * incurring cache line ping pongs.
*/ */
__be16 hh_type ____cacheline_aligned_in_smp; u16 hh_len ____cacheline_aligned_in_smp;
/* protocol identifier, f.e ETH_P_IP u16 __pad;
* NOTE: For VLANs, this will be the
* encapuslated type. --BLG
*/
u16 hh_len; /* length of header */
int (*hh_output)(struct sk_buff *skb); int (*hh_output)(struct sk_buff *skb);
seqlock_t hh_lock; seqlock_t hh_lock;

View File

@ -702,9 +702,9 @@ void neigh_destroy(struct neighbour *neigh)
if (neigh_del_timer(neigh)) if (neigh_del_timer(neigh))
printk(KERN_WARNING "Impossible event.\n"); printk(KERN_WARNING "Impossible event.\n");
while ((hh = neigh->hh) != NULL) { hh = neigh->hh;
neigh->hh = hh->hh_next; if (hh) {
hh->hh_next = NULL; neigh->hh = NULL;
write_seqlock_bh(&hh->hh_lock); write_seqlock_bh(&hh->hh_lock);
hh->hh_output = neigh_blackhole; hh->hh_output = neigh_blackhole;
@ -737,7 +737,8 @@ static void neigh_suspect(struct neighbour *neigh)
neigh->output = neigh->ops->output; neigh->output = neigh->ops->output;
for (hh = neigh->hh; hh; hh = hh->hh_next) hh = neigh->hh;
if (hh)
hh->hh_output = neigh->ops->output; hh->hh_output = neigh->ops->output;
} }
@ -754,7 +755,8 @@ static void neigh_connect(struct neighbour *neigh)
neigh->output = neigh->ops->connected_output; neigh->output = neigh->ops->connected_output;
for (hh = neigh->hh; hh; hh = hh->hh_next) hh = neigh->hh;
if (hh)
hh->hh_output = neigh->ops->hh_output; hh->hh_output = neigh->ops->hh_output;
} }
@ -1025,7 +1027,8 @@ static void neigh_update_hhs(const struct neighbour *neigh)
update = neigh->dev->header_ops->cache_update; update = neigh->dev->header_ops->cache_update;
if (update) { if (update) {
for (hh = neigh->hh; hh; hh = hh->hh_next) { hh = neigh->hh;
if (hh) {
write_seqlock_bh(&hh->hh_lock); write_seqlock_bh(&hh->hh_lock);
update(hh, neigh->dev, neigh->ha); update(hh, neigh->dev, neigh->ha);
write_sequnlock_bh(&hh->hh_lock); write_sequnlock_bh(&hh->hh_lock);
@ -1211,19 +1214,17 @@ struct neighbour *neigh_event_ns(struct neigh_table *tbl,
} }
EXPORT_SYMBOL(neigh_event_ns); EXPORT_SYMBOL(neigh_event_ns);
static inline bool neigh_hh_lookup(struct neighbour *n, struct dst_entry *dst, static inline bool neigh_hh_lookup(struct neighbour *n, struct dst_entry *dst)
__be16 protocol)
{ {
struct hh_cache *hh; struct hh_cache *hh;
smp_rmb(); /* paired with smp_wmb() in neigh_hh_init() */ smp_rmb(); /* paired with smp_wmb() in neigh_hh_init() */
for (hh = n->hh; hh; hh = hh->hh_next) { hh = n->hh;
if (hh->hh_type == protocol) { if (hh) {
atomic_inc(&hh->hh_refcnt); atomic_inc(&hh->hh_refcnt);
if (unlikely(cmpxchg(&dst->hh, NULL, hh) != NULL)) if (unlikely(cmpxchg(&dst->hh, NULL, hh) != NULL))
hh_cache_put(hh); hh_cache_put(hh);
return true; return true;
}
} }
return false; return false;
} }
@ -1235,7 +1236,7 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
struct hh_cache *hh; struct hh_cache *hh;
struct net_device *dev = dst->dev; struct net_device *dev = dst->dev;
if (likely(neigh_hh_lookup(n, dst, protocol))) if (likely(neigh_hh_lookup(n, dst)))
return; return;
/* slow path */ /* slow path */
@ -1244,7 +1245,6 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
return; return;
seqlock_init(&hh->hh_lock); seqlock_init(&hh->hh_lock);
hh->hh_type = protocol;
atomic_set(&hh->hh_refcnt, 2); atomic_set(&hh->hh_refcnt, 2);
if (dev->header_ops->cache(n, hh, protocol)) { if (dev->header_ops->cache(n, hh, protocol)) {
@ -1255,7 +1255,7 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
write_lock_bh(&n->lock); write_lock_bh(&n->lock);
/* must check if another thread already did the insert */ /* must check if another thread already did the insert */
if (neigh_hh_lookup(n, dst, protocol)) { if (neigh_hh_lookup(n, dst)) {
kfree(hh); kfree(hh);
goto end; goto end;
} }
@ -1265,7 +1265,6 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
else else
hh->hh_output = n->ops->output; hh->hh_output = n->ops->output;
hh->hh_next = n->hh;
smp_wmb(); /* paired with smp_rmb() in neigh_hh_lookup() */ smp_wmb(); /* paired with smp_rmb() in neigh_hh_lookup() */
n->hh = hh; n->hh = hh;