kernel_optimize_test/drivers/clk/mvebu/clk-corediv.c
Stephen Boyd 62e59c4e69 clk: Remove io.h from clk-provider.h
Now that we've gotten rid of clk_readl() we can remove io.h from the
clk-provider header and push out the io.h include to any code that isn't
already including the io.h header but using things like readl/writel,
etc.

Found with this grep:

  git grep -l clk-provider.h | grep '.c$' | xargs git grep -L 'linux/io.h' | \
  	xargs git grep -l \
	-e '\<__iowrite32_copy\>' --or \
	-e '\<__ioread32_copy\>' --or \
	-e '\<__iowrite64_copy\>' --or \
	-e '\<ioremap_page_range\>' --or \
	-e '\<ioremap_huge_init\>' --or \
	-e '\<arch_ioremap_pud_supported\>' --or \
	-e '\<arch_ioremap_pmd_supported\>' --or \
	-e '\<devm_ioport_map\>' --or \
	-e '\<devm_ioport_unmap\>' --or \
	-e '\<IOMEM_ERR_PTR\>' --or \
	-e '\<devm_ioremap\>' --or \
	-e '\<devm_ioremap_nocache\>' --or \
	-e '\<devm_ioremap_wc\>' --or \
	-e '\<devm_iounmap\>' --or \
	-e '\<devm_ioremap_release\>' --or \
	-e '\<devm_memremap\>' --or \
	-e '\<devm_memunmap\>' --or \
	-e '\<__devm_memremap_pages\>' --or \
	-e '\<pci_remap_cfgspace\>' --or \
	-e '\<arch_has_dev_port\>' --or \
	-e '\<arch_phys_wc_add\>' --or \
	-e '\<arch_phys_wc_del\>' --or \
	-e '\<memremap\>' --or \
	-e '\<memunmap\>' --or \
	-e '\<arch_io_reserve_memtype_wc\>' --or \
	-e '\<arch_io_free_memtype_wc\>' --or \
	-e '\<__io_aw\>' --or \
	-e '\<__io_pbw\>' --or \
	-e '\<__io_paw\>' --or \
	-e '\<__io_pbr\>' --or \
	-e '\<__io_par\>' --or \
	-e '\<__raw_readb\>' --or \
	-e '\<__raw_readw\>' --or \
	-e '\<__raw_readl\>' --or \
	-e '\<__raw_readq\>' --or \
	-e '\<__raw_writeb\>' --or \
	-e '\<__raw_writew\>' --or \
	-e '\<__raw_writel\>' --or \
	-e '\<__raw_writeq\>' --or \
	-e '\<readb\>' --or \
	-e '\<readw\>' --or \
	-e '\<readl\>' --or \
	-e '\<readq\>' --or \
	-e '\<writeb\>' --or \
	-e '\<writew\>' --or \
	-e '\<writel\>' --or \
	-e '\<writeq\>' --or \
	-e '\<readb_relaxed\>' --or \
	-e '\<readw_relaxed\>' --or \
	-e '\<readl_relaxed\>' --or \
	-e '\<readq_relaxed\>' --or \
	-e '\<writeb_relaxed\>' --or \
	-e '\<writew_relaxed\>' --or \
	-e '\<writel_relaxed\>' --or \
	-e '\<writeq_relaxed\>' --or \
	-e '\<readsb\>' --or \
	-e '\<readsw\>' --or \
	-e '\<readsl\>' --or \
	-e '\<readsq\>' --or \
	-e '\<writesb\>' --or \
	-e '\<writesw\>' --or \
	-e '\<writesl\>' --or \
	-e '\<writesq\>' --or \
	-e '\<inb\>' --or \
	-e '\<inw\>' --or \
	-e '\<inl\>' --or \
	-e '\<outb\>' --or \
	-e '\<outw\>' --or \
	-e '\<outl\>' --or \
	-e '\<inb_p\>' --or \
	-e '\<inw_p\>' --or \
	-e '\<inl_p\>' --or \
	-e '\<outb_p\>' --or \
	-e '\<outw_p\>' --or \
	-e '\<outl_p\>' --or \
	-e '\<insb\>' --or \
	-e '\<insw\>' --or \
	-e '\<insl\>' --or \
	-e '\<outsb\>' --or \
	-e '\<outsw\>' --or \
	-e '\<outsl\>' --or \
	-e '\<insb_p\>' --or \
	-e '\<insw_p\>' --or \
	-e '\<insl_p\>' --or \
	-e '\<outsb_p\>' --or \
	-e '\<outsw_p\>' --or \
	-e '\<outsl_p\>' --or \
	-e '\<ioread8\>' --or \
	-e '\<ioread16\>' --or \
	-e '\<ioread32\>' --or \
	-e '\<ioread64\>' --or \
	-e '\<iowrite8\>' --or \
	-e '\<iowrite16\>' --or \
	-e '\<iowrite32\>' --or \
	-e '\<iowrite64\>' --or \
	-e '\<ioread16be\>' --or \
	-e '\<ioread32be\>' --or \
	-e '\<ioread64be\>' --or \
	-e '\<iowrite16be\>' --or \
	-e '\<iowrite32be\>' --or \
	-e '\<iowrite64be\>' --or \
	-e '\<ioread8_rep\>' --or \
	-e '\<ioread16_rep\>' --or \
	-e '\<ioread32_rep\>' --or \
	-e '\<ioread64_rep\>' --or \
	-e '\<iowrite8_rep\>' --or \
	-e '\<iowrite16_rep\>' --or \
	-e '\<iowrite32_rep\>' --or \
	-e '\<iowrite64_rep\>' --or \
	-e '\<__io_virt\>' --or \
	-e '\<pci_iounmap\>' --or \
	-e '\<virt_to_phys\>' --or \
	-e '\<phys_to_virt\>' --or \
	-e '\<ioremap_uc\>' --or \
	-e '\<ioremap\>' --or \
	-e '\<__ioremap\>' --or \
	-e '\<iounmap\>' --or \
	-e '\<ioremap\>' --or \
	-e '\<ioremap_nocache\>' --or \
	-e '\<ioremap_uc\>' --or \
	-e '\<ioremap_wc\>' --or \
	-e '\<ioremap_wc\>' --or \
	-e '\<ioremap_wt\>' --or \
	-e '\<ioport_map\>' --or \
	-e '\<ioport_unmap\>' --or \
	-e '\<ioport_map\>' --or \
	-e '\<ioport_unmap\>' --or \
	-e '\<xlate_dev_kmem_ptr\>' --or \
	-e '\<xlate_dev_mem_ptr\>' --or \
	-e '\<unxlate_dev_mem_ptr\>' --or \
	-e '\<virt_to_bus\>' --or \
	-e '\<bus_to_virt\>' --or \
	-e '\<memset_io\>' --or \
	-e '\<memcpy_fromio\>' --or \
	-e '\<memcpy_toio\>'

I also reordered a couple includes when they weren't alphabetical and
removed clk.h from kona, replacing it with clk-provider.h because
that driver doesn't use clk consumer APIs.

Acked-by: Geert Uytterhoeven <geert+renesas@glider.be>
Cc: Chen-Yu Tsai <wens@csie.org>
Acked-by: Maxime Ripard <maxime.ripard@bootlin.com>
Acked-by: Tero Kristo <t-kristo@ti.com>
Acked-by: Sekhar Nori <nsekhar@ti.com>
Cc: Krzysztof Kozlowski <krzk@kernel.org>
Acked-by: Mark Brown <broonie@kernel.org>
Cc: Chris Zankel <chris@zankel.net>
Acked-by: Max Filippov <jcmvbkbc@gmail.com>
Acked-by: John Crispin <john@phrozen.org>
Acked-by: Heiko Stuebner <heiko@sntech.de>
Signed-off-by: Stephen Boyd <sboyd@kernel.org>
2019-05-15 13:21:37 -07:00

338 lines
9.1 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* MVEBU Core divider clock
*
* Copyright (C) 2013 Marvell
*
* Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
*
*/
#include <linux/kernel.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include "common.h"
#define CORE_CLK_DIV_RATIO_MASK 0xff
/*
* This structure describes the hardware details (bit offset and mask)
* to configure one particular core divider clock. Those hardware
* details may differ from one SoC to another. This structure is
* therefore typically instantiated statically to describe the
* hardware details.
*/
struct clk_corediv_desc {
unsigned int mask;
unsigned int offset;
unsigned int fieldbit;
};
/*
* This structure describes the hardware details to configure the core
* divider clocks on a given SoC. Amongst others, it points to the
* array of core divider clock descriptors for this SoC, as well as
* the corresponding operations to manipulate them.
*/
struct clk_corediv_soc_desc {
const struct clk_corediv_desc *descs;
unsigned int ndescs;
const struct clk_ops ops;
u32 ratio_reload;
u32 enable_bit_offset;
u32 ratio_offset;
};
/*
* This structure represents one core divider clock for the clock
* framework, and is dynamically allocated for each core divider clock
* existing in the current SoC.
*/
struct clk_corediv {
struct clk_hw hw;
void __iomem *reg;
const struct clk_corediv_desc *desc;
const struct clk_corediv_soc_desc *soc_desc;
spinlock_t lock;
};
static struct clk_onecell_data clk_data;
/*
* Description of the core divider clocks available. For now, we
* support only NAND, and it is available at the same register
* locations regardless of the SoC.
*/
static const struct clk_corediv_desc mvebu_corediv_desc[] = {
{ .mask = 0x3f, .offset = 8, .fieldbit = 1 }, /* NAND clock */
};
static const struct clk_corediv_desc mv98dx3236_corediv_desc[] = {
{ .mask = 0x0f, .offset = 6, .fieldbit = 27 }, /* NAND clock */
};
#define to_corediv_clk(p) container_of(p, struct clk_corediv, hw)
static int clk_corediv_is_enabled(struct clk_hw *hwclk)
{
struct clk_corediv *corediv = to_corediv_clk(hwclk);
const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
const struct clk_corediv_desc *desc = corediv->desc;
u32 enable_mask = BIT(desc->fieldbit) << soc_desc->enable_bit_offset;
return !!(readl(corediv->reg) & enable_mask);
}
static int clk_corediv_enable(struct clk_hw *hwclk)
{
struct clk_corediv *corediv = to_corediv_clk(hwclk);
const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
const struct clk_corediv_desc *desc = corediv->desc;
unsigned long flags = 0;
u32 reg;
spin_lock_irqsave(&corediv->lock, flags);
reg = readl(corediv->reg);
reg |= (BIT(desc->fieldbit) << soc_desc->enable_bit_offset);
writel(reg, corediv->reg);
spin_unlock_irqrestore(&corediv->lock, flags);
return 0;
}
static void clk_corediv_disable(struct clk_hw *hwclk)
{
struct clk_corediv *corediv = to_corediv_clk(hwclk);
const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
const struct clk_corediv_desc *desc = corediv->desc;
unsigned long flags = 0;
u32 reg;
spin_lock_irqsave(&corediv->lock, flags);
reg = readl(corediv->reg);
reg &= ~(BIT(desc->fieldbit) << soc_desc->enable_bit_offset);
writel(reg, corediv->reg);
spin_unlock_irqrestore(&corediv->lock, flags);
}
static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
struct clk_corediv *corediv = to_corediv_clk(hwclk);
const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
const struct clk_corediv_desc *desc = corediv->desc;
u32 reg, div;
reg = readl(corediv->reg + soc_desc->ratio_offset);
div = (reg >> desc->offset) & desc->mask;
return parent_rate / div;
}
static long clk_corediv_round_rate(struct clk_hw *hwclk, unsigned long rate,
unsigned long *parent_rate)
{
/* Valid ratio are 1:4, 1:5, 1:6 and 1:8 */
u32 div;
div = *parent_rate / rate;
if (div < 4)
div = 4;
else if (div > 6)
div = 8;
return *parent_rate / div;
}
static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
unsigned long parent_rate)
{
struct clk_corediv *corediv = to_corediv_clk(hwclk);
const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
const struct clk_corediv_desc *desc = corediv->desc;
unsigned long flags = 0;
u32 reg, div;
div = parent_rate / rate;
spin_lock_irqsave(&corediv->lock, flags);
/* Write new divider to the divider ratio register */
reg = readl(corediv->reg + soc_desc->ratio_offset);
reg &= ~(desc->mask << desc->offset);
reg |= (div & desc->mask) << desc->offset;
writel(reg, corediv->reg + soc_desc->ratio_offset);
/* Set reload-force for this clock */
reg = readl(corediv->reg) | BIT(desc->fieldbit);
writel(reg, corediv->reg);
/* Now trigger the clock update */
reg = readl(corediv->reg) | soc_desc->ratio_reload;
writel(reg, corediv->reg);
/*
* Wait for clocks to settle down, and then clear all the
* ratios request and the reload request.
*/
udelay(1000);
reg &= ~(CORE_CLK_DIV_RATIO_MASK | soc_desc->ratio_reload);
writel(reg, corediv->reg);
udelay(1000);
spin_unlock_irqrestore(&corediv->lock, flags);
return 0;
}
static const struct clk_corediv_soc_desc armada370_corediv_soc = {
.descs = mvebu_corediv_desc,
.ndescs = ARRAY_SIZE(mvebu_corediv_desc),
.ops = {
.enable = clk_corediv_enable,
.disable = clk_corediv_disable,
.is_enabled = clk_corediv_is_enabled,
.recalc_rate = clk_corediv_recalc_rate,
.round_rate = clk_corediv_round_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(8),
.enable_bit_offset = 24,
.ratio_offset = 0x8,
};
static const struct clk_corediv_soc_desc armada380_corediv_soc = {
.descs = mvebu_corediv_desc,
.ndescs = ARRAY_SIZE(mvebu_corediv_desc),
.ops = {
.enable = clk_corediv_enable,
.disable = clk_corediv_disable,
.is_enabled = clk_corediv_is_enabled,
.recalc_rate = clk_corediv_recalc_rate,
.round_rate = clk_corediv_round_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(8),
.enable_bit_offset = 16,
.ratio_offset = 0x4,
};
static const struct clk_corediv_soc_desc armada375_corediv_soc = {
.descs = mvebu_corediv_desc,
.ndescs = ARRAY_SIZE(mvebu_corediv_desc),
.ops = {
.recalc_rate = clk_corediv_recalc_rate,
.round_rate = clk_corediv_round_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(8),
.ratio_offset = 0x4,
};
static const struct clk_corediv_soc_desc mv98dx3236_corediv_soc = {
.descs = mv98dx3236_corediv_desc,
.ndescs = ARRAY_SIZE(mv98dx3236_corediv_desc),
.ops = {
.recalc_rate = clk_corediv_recalc_rate,
.round_rate = clk_corediv_round_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(10),
.ratio_offset = 0x8,
};
static void __init
mvebu_corediv_clk_init(struct device_node *node,
const struct clk_corediv_soc_desc *soc_desc)
{
struct clk_init_data init;
struct clk_corediv *corediv;
struct clk **clks;
void __iomem *base;
const char *parent_name;
const char *clk_name;
int i;
base = of_iomap(node, 0);
if (WARN_ON(!base))
return;
parent_name = of_clk_get_parent_name(node, 0);
clk_data.clk_num = soc_desc->ndescs;
/* clks holds the clock array */
clks = kcalloc(clk_data.clk_num, sizeof(struct clk *),
GFP_KERNEL);
if (WARN_ON(!clks))
goto err_unmap;
/* corediv holds the clock specific array */
corediv = kcalloc(clk_data.clk_num, sizeof(struct clk_corediv),
GFP_KERNEL);
if (WARN_ON(!corediv))
goto err_free_clks;
spin_lock_init(&corediv->lock);
for (i = 0; i < clk_data.clk_num; i++) {
of_property_read_string_index(node, "clock-output-names",
i, &clk_name);
init.num_parents = 1;
init.parent_names = &parent_name;
init.name = clk_name;
init.ops = &soc_desc->ops;
init.flags = 0;
corediv[i].soc_desc = soc_desc;
corediv[i].desc = soc_desc->descs + i;
corediv[i].reg = base;
corediv[i].hw.init = &init;
clks[i] = clk_register(NULL, &corediv[i].hw);
WARN_ON(IS_ERR(clks[i]));
}
clk_data.clks = clks;
of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
return;
err_free_clks:
kfree(clks);
err_unmap:
iounmap(base);
}
static void __init armada370_corediv_clk_init(struct device_node *node)
{
return mvebu_corediv_clk_init(node, &armada370_corediv_soc);
}
CLK_OF_DECLARE(armada370_corediv_clk, "marvell,armada-370-corediv-clock",
armada370_corediv_clk_init);
static void __init armada375_corediv_clk_init(struct device_node *node)
{
return mvebu_corediv_clk_init(node, &armada375_corediv_soc);
}
CLK_OF_DECLARE(armada375_corediv_clk, "marvell,armada-375-corediv-clock",
armada375_corediv_clk_init);
static void __init armada380_corediv_clk_init(struct device_node *node)
{
return mvebu_corediv_clk_init(node, &armada380_corediv_soc);
}
CLK_OF_DECLARE(armada380_corediv_clk, "marvell,armada-380-corediv-clock",
armada380_corediv_clk_init);
static void __init mv98dx3236_corediv_clk_init(struct device_node *node)
{
return mvebu_corediv_clk_init(node, &mv98dx3236_corediv_soc);
}
CLK_OF_DECLARE(mv98dx3236_corediv_clk, "marvell,mv98dx3236-corediv-clock",
mv98dx3236_corediv_clk_init);