forked from luck/tmp_suning_uos_patched
mlx4_core: Allocate and map sufficient ICM memory for EQ context
The current implementation allocates a single host page for EQ context memory, which was OK when we only allocated a few EQs. However, since we now allocate an EQ for each CPU core, this patch removes the hard-coded limit (which we exceed with 4 KB pages and 128 byte EQ context entries with 32 CPUs) and uses the same ICM table code as all other context tables, which ends up simplifying the code quite a bit while fixing the problem. This problem was actually hit in practice on a dual-socket Nehalem box with 16 real hardware threads and sufficiently odd ACPI tables that it shows on boot SMP: Allowing 32 CPUs, 16 hotplug CPUs so num_possible_cpus() ends up 32, and mlx4 ends up creating 33 MSI-X interrupts and 33 EQs. This mlx4 bug means that mlx4 can't even initialize at all on this quite mainstream system. Cc: <stable@kernel.org> Reported-by: Eli Cohen <eli@mellanox.co.il> Tested-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
338a8fad27
commit
fa0681d212
@ -525,48 +525,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
|
||||
iounmap(priv->clr_base);
|
||||
}
|
||||
|
||||
int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* We assume that mapping one page is enough for the whole EQ
|
||||
* context table. This is fine with all current HCAs, because
|
||||
* we only use 32 EQs and each EQ uses 64 bytes of context
|
||||
* memory, or 1 KB total.
|
||||
*/
|
||||
priv->eq_table.icm_virt = icm_virt;
|
||||
priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
|
||||
if (!priv->eq_table.icm_page)
|
||||
return -ENOMEM;
|
||||
priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
|
||||
__free_page(priv->eq_table.icm_page);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
|
||||
if (ret) {
|
||||
pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
__free_page(priv->eq_table.icm_page);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
||||
mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
|
||||
pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
__free_page(priv->eq_table.icm_page);
|
||||
}
|
||||
|
||||
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
@ -525,7 +525,10 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
|
||||
goto err_unmap_aux;
|
||||
}
|
||||
|
||||
err = mlx4_map_eq_icm(dev, init_hca->eqc_base);
|
||||
err = mlx4_init_icm_table(dev, &priv->eq_table.table,
|
||||
init_hca->eqc_base, dev_cap->eqc_entry_sz,
|
||||
dev->caps.num_eqs, dev->caps.num_eqs,
|
||||
0, 0);
|
||||
if (err) {
|
||||
mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
|
||||
goto err_unmap_cmpt;
|
||||
@ -668,7 +671,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
|
||||
mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
|
||||
|
||||
err_unmap_eq:
|
||||
mlx4_unmap_eq_icm(dev);
|
||||
mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
|
||||
|
||||
err_unmap_cmpt:
|
||||
mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
|
||||
@ -698,11 +701,11 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
|
||||
mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
|
||||
mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
|
||||
mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
|
||||
mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
|
||||
mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
|
||||
mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
|
||||
mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
|
||||
mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
|
||||
mlx4_unmap_eq_icm(dev);
|
||||
|
||||
mlx4_UNMAP_ICM_AUX(dev);
|
||||
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
|
||||
|
@ -205,9 +205,7 @@ struct mlx4_eq_table {
|
||||
void __iomem **uar_map;
|
||||
u32 clr_mask;
|
||||
struct mlx4_eq *eq;
|
||||
u64 icm_virt;
|
||||
struct page *icm_page;
|
||||
dma_addr_t icm_dma;
|
||||
struct mlx4_icm_table table;
|
||||
struct mlx4_icm_table cmpt_table;
|
||||
int have_irq;
|
||||
u8 inta_pin;
|
||||
@ -373,9 +371,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
|
||||
struct mlx4_dev_cap *dev_cap,
|
||||
struct mlx4_init_hca_param *init_hca);
|
||||
|
||||
int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt);
|
||||
void mlx4_unmap_eq_icm(struct mlx4_dev *dev);
|
||||
|
||||
int mlx4_cmd_init(struct mlx4_dev *dev);
|
||||
void mlx4_cmd_cleanup(struct mlx4_dev *dev);
|
||||
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
|
||||
|
Loading…
Reference in New Issue
Block a user