forked from luck/tmp_suning_uos_patched
IPoIB: Refresh paths instead of flushing them on SM change events
The patch tries to solve the problem of device going down and paths being flushed on an SM change event. The method is to mark the paths as candidates for refresh (by setting the new valid flag to 0), and wait for an ARP probe a new path record query. The solution requires a different and less intrusive handling of SM change event. For that, the second argument of the flush function changes its meaning from a boolean flag to a level. In most cases, SM failover doesn't cause LID change so traffic won't stop. In the rare cases of LID change, the remote host (the one that hadn't changed its LID) will lose connectivity until paths are refreshed. This is no worse than the current state. In fact, preventing the device from going down saves packets that otherwise would be lost. Signed-off-by: Moni Levy <monil@voltaire.com> Signed-off-by: Moni Shoua <monis@voltaire.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
038919f296
commit
ee1e2c82c2
|
@ -54,6 +54,12 @@
|
|||
|
||||
/* constants */
|
||||
|
||||
enum ipoib_flush_level {
|
||||
IPOIB_FLUSH_LIGHT,
|
||||
IPOIB_FLUSH_NORMAL,
|
||||
IPOIB_FLUSH_HEAVY
|
||||
};
|
||||
|
||||
enum {
|
||||
IPOIB_ENCAP_LEN = 4,
|
||||
|
||||
|
@ -284,10 +290,11 @@ struct ipoib_dev_priv {
|
|||
|
||||
struct delayed_work pkey_poll_task;
|
||||
struct delayed_work mcast_task;
|
||||
struct work_struct flush_task;
|
||||
struct work_struct flush_light;
|
||||
struct work_struct flush_normal;
|
||||
struct work_struct flush_heavy;
|
||||
struct work_struct restart_task;
|
||||
struct delayed_work ah_reap_task;
|
||||
struct work_struct pkey_event_task;
|
||||
|
||||
struct ib_device *ca;
|
||||
u8 port;
|
||||
|
@ -369,6 +376,7 @@ struct ipoib_path {
|
|||
|
||||
struct rb_node rb_node;
|
||||
struct list_head list;
|
||||
int valid;
|
||||
};
|
||||
|
||||
struct ipoib_neigh {
|
||||
|
@ -433,11 +441,14 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
|||
struct ipoib_ah *address, u32 qpn);
|
||||
void ipoib_reap_ah(struct work_struct *work);
|
||||
|
||||
void ipoib_mark_paths_invalid(struct net_device *dev);
|
||||
void ipoib_flush_paths(struct net_device *dev);
|
||||
struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
|
||||
|
||||
int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
|
||||
void ipoib_ib_dev_flush(struct work_struct *work);
|
||||
void ipoib_ib_dev_flush_light(struct work_struct *work);
|
||||
void ipoib_ib_dev_flush_normal(struct work_struct *work);
|
||||
void ipoib_ib_dev_flush_heavy(struct work_struct *work);
|
||||
void ipoib_pkey_event(struct work_struct *work);
|
||||
void ipoib_ib_dev_cleanup(struct net_device *dev);
|
||||
|
||||
|
|
|
@ -902,7 +902,8 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
|
||||
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
|
||||
enum ipoib_flush_level level)
|
||||
{
|
||||
struct ipoib_dev_priv *cpriv;
|
||||
struct net_device *dev = priv->dev;
|
||||
|
@ -915,7 +916,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
|
|||
* the parent is down.
|
||||
*/
|
||||
list_for_each_entry(cpriv, &priv->child_intfs, list)
|
||||
__ipoib_ib_dev_flush(cpriv, pkey_event);
|
||||
__ipoib_ib_dev_flush(cpriv, level);
|
||||
|
||||
mutex_unlock(&priv->vlan_mutex);
|
||||
|
||||
|
@ -929,7 +930,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
|
|||
return;
|
||||
}
|
||||
|
||||
if (pkey_event) {
|
||||
if (level == IPOIB_FLUSH_HEAVY) {
|
||||
if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
|
||||
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
|
||||
ipoib_ib_dev_down(dev, 0);
|
||||
|
@ -947,11 +948,15 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
|
|||
priv->pkey_index = new_index;
|
||||
}
|
||||
|
||||
ipoib_dbg(priv, "flushing\n");
|
||||
if (level == IPOIB_FLUSH_LIGHT) {
|
||||
ipoib_mark_paths_invalid(dev);
|
||||
ipoib_mcast_dev_flush(dev);
|
||||
}
|
||||
|
||||
ipoib_ib_dev_down(dev, 0);
|
||||
if (level >= IPOIB_FLUSH_NORMAL)
|
||||
ipoib_ib_dev_down(dev, 0);
|
||||
|
||||
if (pkey_event) {
|
||||
if (level == IPOIB_FLUSH_HEAVY) {
|
||||
ipoib_ib_dev_stop(dev, 0);
|
||||
ipoib_ib_dev_open(dev);
|
||||
}
|
||||
|
@ -961,27 +966,34 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
|
|||
* we get here, don't bring it back up if it's not configured up
|
||||
*/
|
||||
if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
|
||||
ipoib_ib_dev_up(dev);
|
||||
if (level >= IPOIB_FLUSH_NORMAL)
|
||||
ipoib_ib_dev_up(dev);
|
||||
ipoib_mcast_restart_task(&priv->restart_task);
|
||||
}
|
||||
}
|
||||
|
||||
void ipoib_ib_dev_flush(struct work_struct *work)
|
||||
void ipoib_ib_dev_flush_light(struct work_struct *work)
|
||||
{
|
||||
struct ipoib_dev_priv *priv =
|
||||
container_of(work, struct ipoib_dev_priv, flush_task);
|
||||
container_of(work, struct ipoib_dev_priv, flush_light);
|
||||
|
||||
ipoib_dbg(priv, "Flushing %s\n", priv->dev->name);
|
||||
__ipoib_ib_dev_flush(priv, 0);
|
||||
__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
|
||||
}
|
||||
|
||||
void ipoib_pkey_event(struct work_struct *work)
|
||||
void ipoib_ib_dev_flush_normal(struct work_struct *work)
|
||||
{
|
||||
struct ipoib_dev_priv *priv =
|
||||
container_of(work, struct ipoib_dev_priv, pkey_event_task);
|
||||
container_of(work, struct ipoib_dev_priv, flush_normal);
|
||||
|
||||
ipoib_dbg(priv, "Flushing %s and restarting its QP\n", priv->dev->name);
|
||||
__ipoib_ib_dev_flush(priv, 1);
|
||||
__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
|
||||
}
|
||||
|
||||
void ipoib_ib_dev_flush_heavy(struct work_struct *work)
|
||||
{
|
||||
struct ipoib_dev_priv *priv =
|
||||
container_of(work, struct ipoib_dev_priv, flush_heavy);
|
||||
|
||||
__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
|
||||
}
|
||||
|
||||
void ipoib_ib_dev_cleanup(struct net_device *dev)
|
||||
|
|
|
@ -357,6 +357,23 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter,
|
|||
|
||||
#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
|
||||
|
||||
void ipoib_mark_paths_invalid(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct ipoib_path *path, *tp;
|
||||
|
||||
spin_lock_irq(&priv->lock);
|
||||
|
||||
list_for_each_entry_safe(path, tp, &priv->path_list, list) {
|
||||
ipoib_dbg(priv, "mark path LID 0x%04x GID " IPOIB_GID_FMT " invalid\n",
|
||||
be16_to_cpu(path->pathrec.dlid),
|
||||
IPOIB_GID_ARG(path->pathrec.dgid));
|
||||
path->valid = 0;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&priv->lock);
|
||||
}
|
||||
|
||||
void ipoib_flush_paths(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
|
@ -393,6 +410,7 @@ static void path_rec_completion(int status,
|
|||
struct net_device *dev = path->dev;
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
struct ipoib_ah *ah = NULL;
|
||||
struct ipoib_ah *old_ah;
|
||||
struct ipoib_neigh *neigh, *tn;
|
||||
struct sk_buff_head skqueue;
|
||||
struct sk_buff *skb;
|
||||
|
@ -416,6 +434,7 @@ static void path_rec_completion(int status,
|
|||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
old_ah = path->ah;
|
||||
path->ah = ah;
|
||||
|
||||
if (ah) {
|
||||
|
@ -428,6 +447,17 @@ static void path_rec_completion(int status,
|
|||
__skb_queue_tail(&skqueue, skb);
|
||||
|
||||
list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
|
||||
if (neigh->ah) {
|
||||
WARN_ON(neigh->ah != old_ah);
|
||||
/*
|
||||
* Dropping the ah reference inside
|
||||
* priv->lock is safe here, because we
|
||||
* will hold one more reference from
|
||||
* the original value of path->ah (ie
|
||||
* old_ah).
|
||||
*/
|
||||
ipoib_put_ah(neigh->ah);
|
||||
}
|
||||
kref_get(&path->ah->ref);
|
||||
neigh->ah = path->ah;
|
||||
memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
|
||||
|
@ -450,6 +480,7 @@ static void path_rec_completion(int status,
|
|||
while ((skb = __skb_dequeue(&neigh->queue)))
|
||||
__skb_queue_tail(&skqueue, skb);
|
||||
}
|
||||
path->valid = 1;
|
||||
}
|
||||
|
||||
path->query = NULL;
|
||||
|
@ -457,6 +488,9 @@ static void path_rec_completion(int status,
|
|||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
if (old_ah)
|
||||
ipoib_put_ah(old_ah);
|
||||
|
||||
while ((skb = __skb_dequeue(&skqueue))) {
|
||||
skb->dev = dev;
|
||||
if (dev_queue_xmit(skb))
|
||||
|
@ -630,8 +664,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
|
|||
spin_lock(&priv->lock);
|
||||
|
||||
path = __path_find(dev, phdr->hwaddr + 4);
|
||||
if (!path) {
|
||||
path = path_rec_create(dev, phdr->hwaddr + 4);
|
||||
if (!path || !path->valid) {
|
||||
if (!path)
|
||||
path = path_rec_create(dev, phdr->hwaddr + 4);
|
||||
if (path) {
|
||||
/* put pseudoheader back on for next time */
|
||||
skb_push(skb, sizeof *phdr);
|
||||
|
@ -1046,9 +1081,10 @@ static void ipoib_setup(struct net_device *dev)
|
|||
INIT_LIST_HEAD(&priv->multicast_list);
|
||||
|
||||
INIT_DELAYED_WORK(&priv->pkey_poll_task, ipoib_pkey_poll);
|
||||
INIT_WORK(&priv->pkey_event_task, ipoib_pkey_event);
|
||||
INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
|
||||
INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush);
|
||||
INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
|
||||
INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
|
||||
INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
|
||||
INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
|
||||
INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
|
||||
}
|
||||
|
|
|
@ -290,15 +290,17 @@ void ipoib_event(struct ib_event_handler *handler,
|
|||
if (record->element.port_num != priv->port)
|
||||
return;
|
||||
|
||||
if (record->event == IB_EVENT_PORT_ERR ||
|
||||
record->event == IB_EVENT_PORT_ACTIVE ||
|
||||
record->event == IB_EVENT_LID_CHANGE ||
|
||||
record->event == IB_EVENT_SM_CHANGE ||
|
||||
ipoib_dbg(priv, "Event %d on device %s port %d\n", record->event,
|
||||
record->device->name, record->element.port_num);
|
||||
|
||||
if (record->event == IB_EVENT_SM_CHANGE ||
|
||||
record->event == IB_EVENT_CLIENT_REREGISTER) {
|
||||
ipoib_dbg(priv, "Port state change event\n");
|
||||
queue_work(ipoib_workqueue, &priv->flush_task);
|
||||
queue_work(ipoib_workqueue, &priv->flush_light);
|
||||
} else if (record->event == IB_EVENT_PORT_ERR ||
|
||||
record->event == IB_EVENT_PORT_ACTIVE ||
|
||||
record->event == IB_EVENT_LID_CHANGE) {
|
||||
queue_work(ipoib_workqueue, &priv->flush_normal);
|
||||
} else if (record->event == IB_EVENT_PKEY_CHANGE) {
|
||||
ipoib_dbg(priv, "P_Key change event on port:%d\n", priv->port);
|
||||
queue_work(ipoib_workqueue, &priv->pkey_event_task);
|
||||
queue_work(ipoib_workqueue, &priv->flush_heavy);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user