forked from luck/tmp_suning_uos_patched
Merge branches 'cma' and 'mlx4' into for-next
This commit is contained in:
commit
78c90247af
@ -345,17 +345,17 @@ static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_nu
|
||||
|
||||
err = ib_query_port(device, port_num, &props);
|
||||
if (err)
|
||||
return 1;
|
||||
return err;
|
||||
|
||||
for (i = 0; i < props.gid_tbl_len; ++i) {
|
||||
err = ib_query_gid(device, port_num, i, &tmp);
|
||||
if (err)
|
||||
return 1;
|
||||
return err;
|
||||
if (!memcmp(&tmp, gid, sizeof tmp))
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EAGAIN;
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
static int cma_acquire_dev(struct rdma_id_private *id_priv)
|
||||
@ -388,8 +388,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
|
||||
if (!ret) {
|
||||
id_priv->id.port_num = port;
|
||||
goto out;
|
||||
} else if (ret == 1)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -268,15 +268,15 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
|
||||
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sriov->going_down_lock, flags);
|
||||
spin_lock(&sriov->id_map_lock);
|
||||
spin_lock_irqsave(&sriov->going_down_lock, flags);
|
||||
/*make sure that there is no schedule inside the scheduled work.*/
|
||||
if (!sriov->is_going_down) {
|
||||
id->scheduled_delete = 1;
|
||||
schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
|
||||
}
|
||||
spin_unlock(&sriov->id_map_lock);
|
||||
spin_unlock_irqrestore(&sriov->going_down_lock, flags);
|
||||
spin_unlock(&sriov->id_map_lock);
|
||||
}
|
||||
|
||||
int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
|
||||
|
@ -1498,6 +1498,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
|
||||
u32 reply;
|
||||
u8 is_going_down = 0;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
slave_state[slave].comm_toggle ^= 1;
|
||||
reply = (u32) slave_state[slave].comm_toggle << 31;
|
||||
@ -1576,12 +1577,12 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
|
||||
mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
|
||||
goto reset_slave;
|
||||
}
|
||||
spin_lock(&priv->mfunc.master.slave_state_lock);
|
||||
spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
|
||||
if (!slave_state[slave].is_slave_going_down)
|
||||
slave_state[slave].last_cmd = cmd;
|
||||
else
|
||||
is_going_down = 1;
|
||||
spin_unlock(&priv->mfunc.master.slave_state_lock);
|
||||
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
|
||||
if (is_going_down) {
|
||||
mlx4_warn(dev, "Slave is going down aborting command(%d)"
|
||||
" executing from slave:%d\n",
|
||||
@ -1597,10 +1598,10 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
|
||||
reset_slave:
|
||||
/* cleanup any slave resources */
|
||||
mlx4_delete_all_resources_for_slave(dev, slave);
|
||||
spin_lock(&priv->mfunc.master.slave_state_lock);
|
||||
spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
|
||||
if (!slave_state[slave].is_slave_going_down)
|
||||
slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
|
||||
spin_unlock(&priv->mfunc.master.slave_state_lock);
|
||||
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
|
||||
/*with slave in the middle of flr, no need to clean resources again.*/
|
||||
inform_slave_state:
|
||||
memset(&slave_state[slave].event_eq, 0,
|
||||
|
@ -407,6 +407,7 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
|
||||
struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
|
||||
int i;
|
||||
int err;
|
||||
unsigned long flags;
|
||||
|
||||
mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
|
||||
|
||||
@ -418,10 +419,10 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
|
||||
|
||||
mlx4_delete_all_resources_for_slave(dev, i);
|
||||
/*return the slave to running mode*/
|
||||
spin_lock(&priv->mfunc.master.slave_state_lock);
|
||||
spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
|
||||
slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
|
||||
slave_state[i].is_slave_going_down = 0;
|
||||
spin_unlock(&priv->mfunc.master.slave_state_lock);
|
||||
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
|
||||
/*notify the FW:*/
|
||||
err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
|
||||
@ -446,6 +447,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
|
||||
u8 update_slave_state;
|
||||
int i;
|
||||
enum slave_port_gen_event gen_event;
|
||||
unsigned long flags;
|
||||
|
||||
while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) {
|
||||
/*
|
||||
@ -653,13 +655,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
|
||||
} else
|
||||
update_slave_state = 1;
|
||||
|
||||
spin_lock(&priv->mfunc.master.slave_state_lock);
|
||||
spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
|
||||
if (update_slave_state) {
|
||||
priv->mfunc.master.slave_state[flr_slave].active = false;
|
||||
priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
|
||||
priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
|
||||
}
|
||||
spin_unlock(&priv->mfunc.master.slave_state_lock);
|
||||
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
|
||||
queue_work(priv->mfunc.master.comm_wq,
|
||||
&priv->mfunc.master.slave_flr_event_work);
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user