forked from luck/tmp_suning_uos_patched
Revert "powerpc/fsl_msi: spread msi ints across different MSIRs"
This reverts commit c822e73731
.
This commit conflicted with a bitmap allocator change that partially
accomplishes the same thing, but which does so more correctly. Revert
this one until it can be respun on top of the correct change.
Signed-off-by: Scott Wood <scottwood@freescale.com>
This commit is contained in:
parent
6db35ad237
commit
cb0446c1b6
@ -25,8 +25,6 @@ int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num);
|
||||
void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset,
|
||||
unsigned int num);
|
||||
void msi_bitmap_reserve_hwirq(struct msi_bitmap *bmp, unsigned int hwirq);
|
||||
int msi_bitmap_alloc_hwirqs_from_offset(struct msi_bitmap *bmp, int offset,
|
||||
int num);
|
||||
|
||||
int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp);
|
||||
|
||||
|
@ -213,8 +213,6 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
|
||||
* available interrupt.
|
||||
*/
|
||||
list_for_each_entry(msi_data, &msi_head, list) {
|
||||
int off;
|
||||
|
||||
/*
|
||||
* If the PCI node has an fsl,msi property, then we
|
||||
* restrict our search to the corresponding MSI node.
|
||||
@ -226,28 +224,7 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
|
||||
if (phandle && (phandle != msi_data->phandle))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Allocate the msi message so that it fits on distinct
|
||||
* MSIR registers. Obviously, since MSIR registers are
|
||||
* limited they will overlap at one point.
|
||||
*
|
||||
* Due to the format of the newly introduced MSIIR1 in
|
||||
* mpic 4.3, consecutive msi message values map to
|
||||
* distinct MSIRs, thus distinct msi irq cascades, so
|
||||
* nothing special needs to be done in this case.
|
||||
* On older mpic versions the chose distinct SRS
|
||||
* values by aligning the msi message value to the
|
||||
* SRS field shift.
|
||||
*/
|
||||
if (msi_data->feature & FSL_PIC_FTR_MPIC_4_3) {
|
||||
off = 0;
|
||||
} else {
|
||||
off = atomic_inc_return(&msi_data->msi_alloc_cnt) %
|
||||
msi_data->msir_num;
|
||||
off <<= msi_data->srs_shift;
|
||||
}
|
||||
hwirq = msi_bitmap_alloc_hwirqs_from_offset(
|
||||
&msi_data->bitmap, off, 1);
|
||||
hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
|
||||
if (hwirq >= 0)
|
||||
break;
|
||||
}
|
||||
@ -487,17 +464,12 @@ static int fsl_of_msi_probe(struct platform_device *dev)
|
||||
goto error_out;
|
||||
}
|
||||
|
||||
atomic_set(&msi->msi_alloc_cnt, -1);
|
||||
|
||||
p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
|
||||
|
||||
if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") ||
|
||||
of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) {
|
||||
msi->srs_shift = MSIIR1_SRS_SHIFT;
|
||||
msi->ibs_shift = MSIIR1_IBS_SHIFT;
|
||||
msi->msir_num = NR_MSI_REG_MSIIR1;
|
||||
msi->feature |= FSL_PIC_FTR_MPIC_4_3;
|
||||
|
||||
if (p)
|
||||
dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
|
||||
__func__);
|
||||
@ -515,7 +487,6 @@ static int fsl_of_msi_probe(struct platform_device *dev)
|
||||
|
||||
msi->srs_shift = MSIIR_SRS_SHIFT;
|
||||
msi->ibs_shift = MSIIR_IBS_SHIFT;
|
||||
msi->msir_num = NR_MSI_REG_MSIIR;
|
||||
|
||||
if (p && len % (2 * sizeof(u32)) != 0) {
|
||||
dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
|
||||
|
@ -15,7 +15,6 @@
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <asm/msi_bitmap.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
#define NR_MSI_REG_MSIIR 8 /* MSIIR can index 8 MSI registers */
|
||||
#define NR_MSI_REG_MSIIR1 16 /* MSIIR1 can index 16 MSI registers */
|
||||
@ -28,8 +27,6 @@
|
||||
#define FSL_PIC_IP_IPIC 0x00000002
|
||||
#define FSL_PIC_IP_VMPIC 0x00000003
|
||||
|
||||
#define FSL_PIC_FTR_MPIC_4_3 0x00000010
|
||||
|
||||
struct fsl_msi_cascade_data;
|
||||
|
||||
struct fsl_msi {
|
||||
@ -40,8 +37,6 @@ struct fsl_msi {
|
||||
u32 msiir_offset; /* Offset of MSIIR, relative to start of CCSR */
|
||||
u32 ibs_shift; /* Shift of interrupt bit select */
|
||||
u32 srs_shift; /* Shift of the shared interrupt register select */
|
||||
u32 msir_num; /* Number of available MSIR regs */
|
||||
atomic_t msi_alloc_cnt; /* Counter for MSI hwirq allocations */
|
||||
void __iomem *msi_regs;
|
||||
u32 feature;
|
||||
struct fsl_msi_cascade_data *cascade_array[NR_MSI_REG_MAX];
|
||||
|
@ -14,28 +14,23 @@
|
||||
#include <asm/msi_bitmap.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
int msi_bitmap_alloc_hwirqs_from_offset(struct msi_bitmap *bmp, int offset,
|
||||
int num)
|
||||
{
|
||||
unsigned long flags;
|
||||
int index;
|
||||
int order = get_count_order(num);
|
||||
|
||||
spin_lock_irqsave(&bmp->lock, flags);
|
||||
index = bitmap_find_next_zero_area(bmp->bitmap, bmp->irq_count,
|
||||
offset, num, (1 << order) - 1);
|
||||
bitmap_set(bmp->bitmap, index, num);
|
||||
spin_unlock_irqrestore(&bmp->lock, flags);
|
||||
|
||||
pr_debug("msi_bitmap: found %d free bits starting from offset %d at index %d\n",
|
||||
num, offset, index);
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num)
|
||||
{
|
||||
return msi_bitmap_alloc_hwirqs_from_offset(bmp, 0, num);
|
||||
unsigned long flags;
|
||||
int offset, order = get_count_order(num);
|
||||
|
||||
spin_lock_irqsave(&bmp->lock, flags);
|
||||
/*
|
||||
* This is fast, but stricter than we need. We might want to add
|
||||
* a fallback routine which does a linear search with no alignment.
|
||||
*/
|
||||
offset = bitmap_find_free_region(bmp->bitmap, bmp->irq_count, order);
|
||||
spin_unlock_irqrestore(&bmp->lock, flags);
|
||||
|
||||
pr_debug("msi_bitmap: allocated 0x%x (2^%d) at offset 0x%x\n",
|
||||
num, order, offset);
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset,
|
||||
|
Loading…
Reference in New Issue
Block a user