kernel_optimize_test/drivers/irqchip/irq-crossbar.c
Marc Zyngier 783d31863f irqchip: crossbar: Convert dra7 crossbar to stacked domains
Support for the TI crossbar used on the DRA7 family of chips
is implemented as an ugly hack on the side of the GIC.

Converting it to stacked domains makes it slightly more
palatable, as it results in a cleanup.

Unfortunately, as the DT bindings failed to acknowledge the
fact that this is actually yet another interrupt controller
(the third, actually), we have yet another breakage. Oh well.

Acked-by: Tony Lindgren <tony@atomide.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Link: https://lkml.kernel.org/r/1426088629-15377-3-git-send-email-marc.zyngier@arm.com
Signed-off-by: Jason Cooper <jason@lakedaemon.net>
2015-03-15 00:55:24 +00:00

363 lines
8.3 KiB
C

/*
* drivers/irqchip/irq-crossbar.c
*
* Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
* Author: Sricharan R <r.sricharan@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
#include "irqchip.h"
#define IRQ_FREE -1
#define IRQ_RESERVED -2
#define IRQ_SKIP -3
#define GIC_IRQ_START 32
/**
* struct crossbar_device - crossbar device description
* @lock: spinlock serializing access to @irq_map
* @int_max: maximum number of supported interrupts
* @safe_map: safe default value to initialize the crossbar
* @max_crossbar_sources: Maximum number of crossbar sources
* @irq_map: array of interrupts to crossbar number mapping
* @crossbar_base: crossbar base address
* @register_offsets: offsets for each irq number
* @write: register write function pointer
*/
struct crossbar_device {
raw_spinlock_t lock;
uint int_max;
uint safe_map;
uint max_crossbar_sources;
uint *irq_map;
void __iomem *crossbar_base;
int *register_offsets;
void (*write)(int, int);
};
static struct crossbar_device *cb;
static void crossbar_writel(int irq_no, int cb_no)
{
writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
}
static void crossbar_writew(int irq_no, int cb_no)
{
writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
}
static void crossbar_writeb(int irq_no, int cb_no)
{
writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
}
static struct irq_chip crossbar_chip = {
.name = "CBAR",
.irq_eoi = irq_chip_eoi_parent,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_wake = irq_chip_set_wake_parent,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
#endif
};
static int allocate_gic_irq(struct irq_domain *domain, unsigned virq,
irq_hw_number_t hwirq)
{
struct of_phandle_args args;
int i;
int err;
raw_spin_lock(&cb->lock);
for (i = cb->int_max - 1; i >= 0; i--) {
if (cb->irq_map[i] == IRQ_FREE) {
cb->irq_map[i] = hwirq;
break;
}
}
raw_spin_unlock(&cb->lock);
if (i < 0)
return -ENODEV;
args.np = domain->parent->of_node;
args.args_count = 3;
args.args[0] = 0; /* SPI */
args.args[1] = i;
args.args[2] = IRQ_TYPE_LEVEL_HIGH;
err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args);
if (err)
cb->irq_map[i] = IRQ_FREE;
else
cb->write(i, hwirq);
return err;
}
static int crossbar_domain_alloc(struct irq_domain *d, unsigned int virq,
unsigned int nr_irqs, void *data)
{
struct of_phandle_args *args = data;
irq_hw_number_t hwirq;
int i;
if (args->args_count != 3)
return -EINVAL; /* Not GIC compliant */
if (args->args[0] != 0)
return -EINVAL; /* No PPI should point to this domain */
hwirq = args->args[1];
if ((hwirq + nr_irqs) > cb->max_crossbar_sources)
return -EINVAL; /* Can't deal with this */
for (i = 0; i < nr_irqs; i++) {
int err = allocate_gic_irq(d, virq + i, hwirq + i);
if (err)
return err;
irq_domain_set_hwirq_and_chip(d, virq + i, hwirq + i,
&crossbar_chip, NULL);
}
return 0;
}
/**
* crossbar_domain_free - unmap/free a crossbar<->irq connection
* @domain: domain of irq to unmap
* @virq: virq number
* @nr_irqs: number of irqs to free
*
* We do not maintain a use count of total number of map/unmap
* calls for a particular irq to find out if a irq can be really
* unmapped. This is because unmap is called during irq_dispose_mapping(irq),
* after which irq is anyways unusable. So an explicit map has to be called
* after that.
*/
static void crossbar_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
int i;
raw_spin_lock(&cb->lock);
for (i = 0; i < nr_irqs; i++) {
struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
irq_domain_reset_irq_data(d);
cb->irq_map[d->hwirq] = IRQ_FREE;
cb->write(d->hwirq, cb->safe_map);
}
raw_spin_unlock(&cb->lock);
}
static int crossbar_domain_xlate(struct irq_domain *d,
struct device_node *controller,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq,
unsigned int *out_type)
{
if (d->of_node != controller)
return -EINVAL; /* Shouldn't happen, really... */
if (intsize != 3)
return -EINVAL; /* Not GIC compliant */
if (intspec[0] != 0)
return -EINVAL; /* No PPI should point to this domain */
*out_hwirq = intspec[1];
*out_type = intspec[2];
return 0;
}
static const struct irq_domain_ops crossbar_domain_ops = {
.alloc = crossbar_domain_alloc,
.free = crossbar_domain_free,
.xlate = crossbar_domain_xlate,
};
static int __init crossbar_of_init(struct device_node *node)
{
int i, size, max = 0, reserved = 0, entry;
const __be32 *irqsr;
int ret = -ENOMEM;
cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return ret;
cb->crossbar_base = of_iomap(node, 0);
if (!cb->crossbar_base)
goto err_cb;
of_property_read_u32(node, "ti,max-crossbar-sources",
&cb->max_crossbar_sources);
if (!cb->max_crossbar_sources) {
pr_err("missing 'ti,max-crossbar-sources' property\n");
ret = -EINVAL;
goto err_base;
}
of_property_read_u32(node, "ti,max-irqs", &max);
if (!max) {
pr_err("missing 'ti,max-irqs' property\n");
ret = -EINVAL;
goto err_base;
}
cb->irq_map = kcalloc(max, sizeof(int), GFP_KERNEL);
if (!cb->irq_map)
goto err_base;
cb->int_max = max;
for (i = 0; i < max; i++)
cb->irq_map[i] = IRQ_FREE;
/* Get and mark reserved irqs */
irqsr = of_get_property(node, "ti,irqs-reserved", &size);
if (irqsr) {
size /= sizeof(__be32);
for (i = 0; i < size; i++) {
of_property_read_u32_index(node,
"ti,irqs-reserved",
i, &entry);
if (entry >= max) {
pr_err("Invalid reserved entry\n");
ret = -EINVAL;
goto err_irq_map;
}
cb->irq_map[entry] = IRQ_RESERVED;
}
}
/* Skip irqs hardwired to bypass the crossbar */
irqsr = of_get_property(node, "ti,irqs-skip", &size);
if (irqsr) {
size /= sizeof(__be32);
for (i = 0; i < size; i++) {
of_property_read_u32_index(node,
"ti,irqs-skip",
i, &entry);
if (entry >= max) {
pr_err("Invalid skip entry\n");
ret = -EINVAL;
goto err_irq_map;
}
cb->irq_map[entry] = IRQ_SKIP;
}
}
cb->register_offsets = kcalloc(max, sizeof(int), GFP_KERNEL);
if (!cb->register_offsets)
goto err_irq_map;
of_property_read_u32(node, "ti,reg-size", &size);
switch (size) {
case 1:
cb->write = crossbar_writeb;
break;
case 2:
cb->write = crossbar_writew;
break;
case 4:
cb->write = crossbar_writel;
break;
default:
pr_err("Invalid reg-size property\n");
ret = -EINVAL;
goto err_reg_offset;
break;
}
/*
* Register offsets are not linear because of the
* reserved irqs. so find and store the offsets once.
*/
for (i = 0; i < max; i++) {
if (cb->irq_map[i] == IRQ_RESERVED)
continue;
cb->register_offsets[i] = reserved;
reserved += size;
}
of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
/* Initialize the crossbar with safe map to start with */
for (i = 0; i < max; i++) {
if (cb->irq_map[i] == IRQ_RESERVED ||
cb->irq_map[i] == IRQ_SKIP)
continue;
cb->write(i, cb->safe_map);
}
raw_spin_lock_init(&cb->lock);
return 0;
err_reg_offset:
kfree(cb->register_offsets);
err_irq_map:
kfree(cb->irq_map);
err_base:
iounmap(cb->crossbar_base);
err_cb:
kfree(cb);
cb = NULL;
return ret;
}
static int __init irqcrossbar_init(struct device_node *node,
struct device_node *parent)
{
struct irq_domain *parent_domain, *domain;
int err;
if (!parent) {
pr_err("%s: no parent, giving up\n", node->full_name);
return -ENODEV;
}
parent_domain = irq_find_host(parent);
if (!parent_domain) {
pr_err("%s: unable to obtain parent domain\n", node->full_name);
return -ENXIO;
}
err = crossbar_of_init(node);
if (err)
return err;
domain = irq_domain_add_hierarchy(parent_domain, 0,
cb->max_crossbar_sources,
node, &crossbar_domain_ops,
NULL);
if (!domain) {
pr_err("%s: failed to allocated domain\n", node->full_name);
return -ENOMEM;
}
return 0;
}
IRQCHIP_DECLARE(ti_irqcrossbar, "ti,irq-crossbar", irqcrossbar_init);