ipq806x: switch to generic cpufreq driver cpufreq-dt

This fixes ondemand frequency scaling and moves ipq806x onto upstream driver

Also switching to ondemand frequency scaling as it is fixed now

Signed-off-by: Pavel Kubelun <be.dissent@gmail.com>
This commit is contained in:
Pavel Kubelun 2016-11-02 22:37:28 +03:00 committed by John Crispin
parent a154a3d8be
commit 681f28990d
5 changed files with 383 additions and 466 deletions

View File

@ -77,7 +77,7 @@ CONFIG_COMMON_CLK=y
CONFIG_COMMON_CLK_QCOM=y
CONFIG_COMPACTION=y
CONFIG_COREDUMP=y
# CONFIG_CPUFREQ_DT is not set
CONFIG_CPUFREQ_DT=y
CONFIG_CPU_32v6K=y
CONFIG_CPU_32v7=y
CONFIG_CPU_ABRT_EV7=y
@ -89,7 +89,8 @@ CONFIG_CPU_COPY_V6=y
CONFIG_CPU_CP15=y
CONFIG_CPU_CP15_MMU=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_COMMON=y
# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
@ -105,7 +106,7 @@ CONFIG_CPU_IDLE_GOV_MENU=y
CONFIG_CPU_PABRT_V7=y
CONFIG_CPU_PM=y
CONFIG_CPU_RMAP=y
# CONFIG_CPU_THERMAL is not set
CONFIG_CPU_THERMAL=y
CONFIG_CPU_TLB_V7=y
CONFIG_CPU_V7=y
CONFIG_CRC16=y
@ -146,7 +147,6 @@ CONFIG_GENERIC_ALLOCATOR=y
CONFIG_GENERIC_BUG=y
CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
CONFIG_GENERIC_CPUFREQ_KRAIT=y
CONFIG_GENERIC_IDLE_POLL_SETUP=y
CONFIG_GENERIC_IO=y
CONFIG_GENERIC_IRQ_SHOW=y
@ -378,7 +378,7 @@ CONFIG_QCOM_BAM_DMA=y
CONFIG_QCOM_GDSC=y
CONFIG_QCOM_GSBI=y
CONFIG_QCOM_HFPLL=y
# CONFIG_QCOM_PM is not set
CONFIG_QCOM_PM=y
CONFIG_QCOM_QFPROM=y
CONFIG_QCOM_SCM=y
CONFIG_QCOM_SCM_32=y

View File

@ -1,461 +0,0 @@
From dd77db4143290689d3a5e1ec61627233d0711b66 Mon Sep 17 00:00:00 2001
From: Stephen Boyd <sboyd@codeaurora.org>
Date: Fri, 30 May 2014 16:36:11 -0700
Subject: [PATCH] FROMLIST: cpufreq: Add a cpufreq-krait based on cpufreq-cpu0
Krait processors have individual clocks for each CPU that can
scale independently from one another. cpufreq-cpu0 is fairly
close to this, but assumes that there is only one clock for all
CPUs. Add a driver to support the Krait configuration.
TODO: Merge into cpufreq-cpu0? Or make generic?
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
---
drivers/cpufreq/Kconfig | 13 +++
drivers/cpufreq/Makefile | 1 +
drivers/cpufreq/cpufreq-krait.c | 190 ++++++++++++++++++++++++++++++++++++++++
3 files changed, 204 insertions(+)
create mode 100644 drivers/cpufreq/cpufreq-krait.c
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -198,6 +198,19 @@ config CPUFREQ_DT
If in doubt, say N.
+config GENERIC_CPUFREQ_KRAIT
+ tristate "Krait cpufreq driver"
+ depends on HAVE_CLK && OF
+ # if CPU_THERMAL is on and THERMAL=m, CPU0 cannot be =y:
+ depends on !CPU_THERMAL || THERMAL
+ select PM_OPP
+ help
+ This adds a generic cpufreq driver for CPU0 frequency management.
+ It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
+ systems which share clock and voltage across all CPUs.
+
+ If in doubt, say N.
+
if X86
source "drivers/cpufreq/Kconfig.x86"
endif
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)
obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
+obj-$(CONFIG_GENERIC_CPUFREQ_KRAIT) += cpufreq-krait.o
##################################################################################
# x86 drivers.
--- /dev/null
+++ b/drivers/cpufreq/cpufreq-krait.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * The OPP code in function krait_set_target() is reused from
+ * drivers/cpufreq/omap-cpufreq.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpu_cooling.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+static unsigned int transition_latency;
+static unsigned int voltage_tolerance; /* in percentage */
+
+static struct device *cpu_dev;
+static DEFINE_PER_CPU(struct clk *, krait_cpu_clks);
+static DEFINE_PER_CPU(struct regulator *, krait_supply_core);
+static struct cpufreq_frequency_table *freq_table;
+static struct thermal_cooling_device *cdev;
+
+struct cache_points {
+ unsigned long cache_freq;
+ unsigned int cache_volt;
+ unsigned long cpu_freq;
+};
+
+static struct regulator *krait_l2_reg;
+static struct clk *krait_l2_clk;
+static struct cache_points *krait_l2_points;
+static int nr_krait_l2_points;
+
+static int krait_parse_cache_points(struct device *dev,
+ struct device_node *of_node)
+{
+ const struct property *prop;
+ const __be32 *val;
+ int nr, i;
+
+ prop = of_find_property(of_node, "cache-points-kHz", NULL);
+ if (!prop)
+ return -ENODEV;
+ if (!prop->value)
+ return -ENODATA;
+
+ /*
+ * Each OPP is a set of tuples consisting of frequency and
+ * cpu-frequency like <freq-kHz volt-uV freq-kHz>.
+ */
+ nr = prop->length / sizeof(u32);
+ if (nr % 3) {
+ dev_err(dev, "%s: Invalid cache points\n", __func__);
+ return -EINVAL;
+ }
+ nr /= 3;
+
+ krait_l2_points = devm_kcalloc(dev, nr, sizeof(*krait_l2_points),
+ GFP_KERNEL);
+ if (!krait_l2_points)
+ return -ENOMEM;
+ nr_krait_l2_points = nr;
+
+ for (i = 0, val = prop->value; i < nr; i++) {
+ unsigned long cache_freq = be32_to_cpup(val++) * 1000;
+ unsigned int cache_volt = be32_to_cpup(val++);
+ unsigned long cpu_freq = be32_to_cpup(val++) * 1000;
+
+ krait_l2_points[i].cache_freq = cache_freq;
+ krait_l2_points[i].cache_volt = cache_volt;
+ krait_l2_points[i].cpu_freq = cpu_freq;
+ }
+
+ return 0;
+}
+
+static int krait_set_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct dev_pm_opp *opp;
+ unsigned long volt = 0, volt_old = 0, tol = 0;
+ unsigned long freq, max_cpu_freq = 0;
+ unsigned int old_freq, new_freq;
+ long freq_Hz, freq_exact;
+ int ret, i;
+ struct clk *cpu_clk;
+ struct regulator *core;
+ unsigned int cpu;
+
+ cpu_clk = per_cpu(krait_cpu_clks, policy->cpu);
+
+ freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
+ if (freq_Hz <= 0)
+ freq_Hz = freq_table[index].frequency * 1000;
+
+ freq_exact = freq_Hz;
+ new_freq = freq_Hz / 1000;
+ old_freq = clk_get_rate(cpu_clk) / 1000;
+
+ core = per_cpu(krait_supply_core, policy->cpu);
+
+ rcu_read_lock();
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ pr_err("failed to find OPP for %ld\n", freq_Hz);
+ return PTR_ERR(opp);
+ }
+ volt = dev_pm_opp_get_voltage(opp);
+ rcu_read_unlock();
+ tol = volt * voltage_tolerance / 100;
+ volt_old = regulator_get_voltage(core);
+
+ pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
+ old_freq / 1000, volt_old ? volt_old / 1000 : -1,
+ new_freq / 1000, volt ? volt / 1000 : -1);
+
+ /* scaling up? scale voltage before frequency */
+ if (new_freq > old_freq) {
+ ret = regulator_set_voltage_tol(core, volt, tol);
+ if (ret) {
+ pr_err("failed to scale voltage up: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = clk_set_rate(cpu_clk, freq_exact);
+ if (ret) {
+ pr_err("failed to set clock rate: %d\n", ret);
+ return ret;
+ }
+
+ /* scaling down? scale voltage after frequency */
+ if (new_freq < old_freq) {
+ ret = regulator_set_voltage_tol(core, volt, tol);
+ if (ret) {
+ pr_err("failed to scale voltage down: %d\n", ret);
+ clk_set_rate(cpu_clk, old_freq * 1000);
+ }
+ }
+
+ for_each_possible_cpu(cpu) {
+ freq = clk_get_rate(per_cpu(krait_cpu_clks, cpu));
+ max_cpu_freq = max(max_cpu_freq, freq);
+ }
+
+ for (i = 0; i < nr_krait_l2_points; i++) {
+ if (max_cpu_freq >= krait_l2_points[i].cpu_freq) {
+ if (krait_l2_reg) {
+ ret = regulator_set_voltage_tol(krait_l2_reg,
+ krait_l2_points[i].cache_volt,
+ tol);
+ if (ret) {
+ pr_err("failed to scale l2 voltage: %d\n",
+ ret);
+ }
+ }
+ ret = clk_set_rate(krait_l2_clk,
+ krait_l2_points[i].cache_freq);
+ if (ret)
+ pr_err("failed to scale l2 clk: %d\n", ret);
+ break;
+ }
+
+ }
+
+ return ret;
+}
+
+static int krait_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ policy->clk = per_cpu(krait_cpu_clks, policy->cpu);
+
+ ret = cpufreq_table_validate_and_show(policy, freq_table);
+ if (ret) {
+ pr_err("%s: invalid frequency table: %d\n", __func__, ret);
+ return ret;
+ }
+
+ policy->cpuinfo.transition_latency = transition_latency;
+
+ return 0;
+}
+
+static struct cpufreq_driver krait_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = krait_set_target,
+ .get = cpufreq_generic_get,
+ .init = krait_cpufreq_init,
+ .name = "generic_krait",
+ .attr = cpufreq_generic_attr,
+};
+
+static int krait_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device_node *np, *cache;
+ int ret, i;
+ unsigned int cpu;
+ struct device *dev;
+ struct clk *clk;
+ struct regulator *core;
+ unsigned long freq_Hz, freq, max_cpu_freq = 0;
+ struct dev_pm_opp *opp;
+ unsigned long volt, tol;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ pr_err("failed to get krait device\n");
+ return -ENODEV;
+ }
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np) {
+ pr_err("failed to find krait node\n");
+ return -ENOENT;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ pr_err("failed to init cpufreq table: %d\n", ret);
+ goto out_put_node;
+ }
+
+ of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
+
+ if (of_property_read_u32(np, "clock-latency", &transition_latency))
+ transition_latency = CPUFREQ_ETERNAL;
+
+ cache = of_find_next_cache_node(np);
+ if (cache) {
+ struct device_node *vdd;
+
+ vdd = of_parse_phandle(cache, "vdd_dig-supply", 0);
+ if (vdd) {
+ krait_l2_reg = regulator_get(NULL, vdd->name);
+ if (IS_ERR(krait_l2_reg)) {
+ pr_warn("failed to get l2 vdd_dig supply\n");
+ krait_l2_reg = NULL;
+ }
+ of_node_put(vdd);
+ }
+
+ krait_l2_clk = of_clk_get(cache, 0);
+ if (!IS_ERR(krait_l2_clk)) {
+ ret = krait_parse_cache_points(&pdev->dev, cache);
+ if (ret)
+ clk_put(krait_l2_clk);
+ }
+ if (IS_ERR(krait_l2_clk) || ret)
+ krait_l2_clk = NULL;
+ }
+
+ for_each_possible_cpu(cpu) {
+ dev = get_cpu_device(cpu);
+ if (!dev) {
+ pr_err("failed to get krait device\n");
+ ret = -ENOENT;
+ goto out_free_table;
+ }
+ per_cpu(krait_cpu_clks, cpu) = clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto out_free_table;
+ }
+ core = devm_regulator_get(dev, "core");
+ if (IS_ERR(core)) {
+ pr_debug("failed to get core regulator\n");
+ ret = PTR_ERR(core);
+ goto out_free_table;
+ }
+ per_cpu(krait_supply_core, cpu) = core;
+
+ freq = freq_Hz = clk_get_rate(clk);
+
+ rcu_read_lock();
+ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ pr_err("failed to find OPP for %ld\n", freq_Hz);
+ ret = PTR_ERR(opp);
+ goto out_free_table;
+ }
+ volt = dev_pm_opp_get_voltage(opp);
+ rcu_read_unlock();
+
+ tol = volt * voltage_tolerance / 100;
+ ret = regulator_set_voltage_tol(core, volt, tol);
+ if (ret) {
+ pr_err("failed to scale voltage up: %d\n", ret);
+ goto out_free_table;
+ }
+ ret = regulator_enable(core);
+ if (ret) {
+ pr_err("failed to enable regulator: %d\n", ret);
+ goto out_free_table;
+ }
+ max_cpu_freq = max(max_cpu_freq, freq);
+ }
+
+ for (i = 0; i < nr_krait_l2_points; i++) {
+ if (max_cpu_freq >= krait_l2_points[i].cpu_freq) {
+ if (krait_l2_reg) {
+ ret = regulator_set_voltage_tol(krait_l2_reg,
+ krait_l2_points[i].cache_volt,
+ tol);
+ if (ret)
+ pr_err("failed to scale l2 voltage: %d\n",
+ ret);
+ ret = regulator_enable(krait_l2_reg);
+ if (ret)
+ pr_err("failed to enable l2 voltage: %d\n",
+ ret);
+ }
+ break;
+ }
+
+ }
+
+ ret = cpufreq_register_driver(&krait_cpufreq_driver);
+ if (ret) {
+ pr_err("failed register driver: %d\n", ret);
+ goto out_free_table;
+ }
+ of_node_put(np);
+
+ /*
+ * For now, just loading the cooling device;
+ * thermal DT code takes care of matching them.
+ */
+ for_each_possible_cpu(cpu) {
+ dev = get_cpu_device(cpu);
+ np = of_node_get(dev->of_node);
+ if (of_find_property(np, "#cooling-cells", NULL)) {
+ cdev = of_cpufreq_cooling_register(np, cpumask_of(cpu));
+ if (IS_ERR(cdev))
+ pr_err("running cpufreq without cooling device: %ld\n",
+ PTR_ERR(cdev));
+ }
+ of_node_put(np);
+ }
+
+ return 0;
+
+out_free_table:
+ regulator_put(krait_l2_reg);
+ clk_put(krait_l2_clk);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_put_node:
+ of_node_put(np);
+ return ret;
+}
+
+static int krait_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_cooling_unregister(cdev);
+ cpufreq_unregister_driver(&krait_cpufreq_driver);
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+ clk_put(krait_l2_clk);
+ regulator_put(krait_l2_reg);
+
+ return 0;
+}
+
+static struct platform_driver krait_cpufreq_platdrv = {
+ .driver = {
+ .name = "cpufreq-krait",
+ .owner = THIS_MODULE,
+ },
+ .probe = krait_cpufreq_probe,
+ .remove = krait_cpufreq_remove,
+};
+module_platform_driver(krait_cpufreq_platdrv);
+
+MODULE_DESCRIPTION("Krait CPUfreq driver");
+MODULE_LICENSE("GPL v2");
--- a/drivers/cpufreq/qcom-cpufreq.c
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -168,11 +168,8 @@ static int __init qcom_cpufreq_populate_
static int __init qcom_cpufreq_driver_init(void)
{
- struct cpufreq_dt_platform_data pdata = { .independent_clocks = true };
struct platform_device_info devinfo = {
- .name = "cpufreq-dt",
- .data = &pdata,
- .size_data = sizeof(pdata),
+ .name = "cpufreq-krait",
};
struct device *cpu_dev;
struct device_node *np;

View File

@ -0,0 +1,184 @@
From 175329015c8a0b480240da222822d2f8316f074d Mon Sep 17 00:00:00 2001
From: Stephen Boyd <sboyd@codeaurora.org>
Date: Mon, 1 Jun 2015 18:47:58 -0700
Subject: cpufreq-dt: Handle OPP voltage adjust events
On some SoCs the Adaptive Voltage Scaling (AVS) technique is
employed to optimize the operating voltage of a device. At a
given frequency, the hardware monitors dynamic factors and either
makes a suggestion for how much to adjust a voltage for the
current frequency, or it automatically adjusts the voltage
without software intervention.
In the former case, an AVS driver will call
dev_pm_opp_modify_voltage() and update the voltage for the
particular OPP the CPUs are using. Add an OPP notifier to
cpufreq-dt so that we can adjust the voltage of the CPU when AVS
updates the OPP.
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
---
drivers/cpufreq/cpufreq-dt.c | 72 ++++++++++++++++++++++++++++++++++++++++----
1 file changed, 66 insertions(+), 6 deletions(-)
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -34,6 +34,9 @@ struct private_data {
struct regulator *cpu_reg;
struct thermal_cooling_device *cdev;
unsigned int voltage_tolerance; /* in percentage */
+ struct notifier_block opp_nb;
+ struct mutex lock;
+ unsigned long opp_freq;
};
static struct freq_attr *cpufreq_dt_attr[] = {
@@ -42,6 +45,42 @@ static struct freq_attr *cpufreq_dt_attr
NULL,
};
+static int opp_notifier(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ struct dev_pm_opp *opp = data;
+ struct private_data *priv = container_of(nb, struct private_data,
+ opp_nb);
+ struct device *cpu_dev = priv->cpu_dev;
+ struct regulator *cpu_reg = priv->cpu_reg;
+ unsigned long volt, tol, freq;
+ int ret = 0;
+
+ switch (event) {
+ case OPP_EVENT_ADJUST_VOLTAGE:
+ volt = dev_pm_opp_get_voltage(opp);
+ freq = dev_pm_opp_get_freq(opp);
+ tol = volt * priv->voltage_tolerance / 100;
+
+ mutex_lock(&priv->lock);
+ if (freq == priv->opp_freq)
+ ret = regulator_set_voltage_tol(cpu_reg, volt,
+ tol);
+ mutex_unlock(&priv->lock);
+ if (ret) {
+ dev_err(cpu_dev,
+ "failed to scale voltage up: %d\n",
+ ret);
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct dev_pm_opp *opp;
@@ -53,6 +92,7 @@ static int set_target(struct cpufreq_pol
unsigned long volt = 0, volt_old = 0, tol = 0;
unsigned int old_freq, new_freq;
long freq_Hz, freq_exact;
+ unsigned long opp_freq = 0;
int ret;
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
@@ -63,8 +103,8 @@ static int set_target(struct cpufreq_pol
new_freq = freq_Hz / 1000;
old_freq = clk_get_rate(cpu_clk) / 1000;
+ mutex_lock(&priv->lock);
if (!IS_ERR(cpu_reg)) {
- unsigned long opp_freq;
rcu_read_lock();
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
@@ -72,7 +112,8 @@ static int set_target(struct cpufreq_pol
rcu_read_unlock();
dev_err(cpu_dev, "failed to find OPP for %ld\n",
freq_Hz);
- return PTR_ERR(opp);
+ ret = PTR_ERR(opp);
+ goto out;
}
volt = dev_pm_opp_get_voltage(opp);
opp_freq = dev_pm_opp_get_freq(opp);
@@ -93,7 +134,7 @@ static int set_target(struct cpufreq_pol
if (ret) {
dev_err(cpu_dev, "failed to scale voltage up: %d\n",
ret);
- return ret;
+ goto out;
}
}
@@ -102,7 +143,7 @@ static int set_target(struct cpufreq_pol
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
if (!IS_ERR(cpu_reg) && volt_old > 0)
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
- return ret;
+ goto out;
}
/* scaling down? scale voltage after frequency */
@@ -112,9 +153,12 @@ static int set_target(struct cpufreq_pol
dev_err(cpu_dev, "failed to scale voltage down: %d\n",
ret);
clk_set_rate(cpu_clk, old_freq * 1000);
+ goto out;
}
}
-
+ priv->opp_freq = opp_freq;
+out:
+ mutex_unlock(&priv->lock);
return ret;
}
@@ -201,6 +245,7 @@ static int cpufreq_init(struct cpufreq_p
unsigned int transition_latency;
bool need_update = false;
int ret;
+ struct srcu_notifier_head *opp_srcu_head;
ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
if (ret) {
@@ -277,6 +322,19 @@ static int cpufreq_init(struct cpufreq_p
goto out_free_opp;
}
+ mutex_init(&priv->lock);
+
+ opp_srcu_head = dev_pm_opp_get_notifier(cpu_dev);
+ if (IS_ERR(opp_srcu_head)) {
+ ret = PTR_ERR(opp_srcu_head);
+ goto out_free_priv;
+ }
+
+ priv->opp_nb.notifier_call = opp_notifier;
+ ret = srcu_notifier_chain_register(opp_srcu_head, &priv->opp_nb);
+ if (ret)
+ goto out_free_priv;
+
of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
if (!transition_latency)
@@ -326,7 +384,7 @@ static int cpufreq_init(struct cpufreq_p
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
pr_err("failed to init cpufreq table: %d\n", ret);
- goto out_free_priv;
+ goto out_unregister_nb;
}
priv->cpu_dev = cpu_dev;
@@ -365,6 +423,8 @@ static int cpufreq_init(struct cpufreq_p
out_free_cpufreq_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_unregister_nb:
+ srcu_notifier_chain_unregister(opp_srcu_head, &priv->opp_nb);
out_free_priv:
kfree(priv);
out_free_opp:

View File

@ -0,0 +1,161 @@
From b4629f9e30e865402c643de6d4668be790fc0539 Mon Sep 17 00:00:00 2001
From: Georgi Djakov <georgi.djakov@linaro.org>
Date: Tue, 8 Sep 2015 11:24:41 +0300
Subject: cpufreq-dt: Add L2 frequency scaling support
Signed-off-by: Georgi Djakov <georgi.djakov@linaro.org>
Conflicts:
drivers/cpufreq/cpufreq-dt.c
---
drivers/cpufreq/cpufreq-dt.c | 54 ++++++++++++++++++++++++++++++++++++++------
include/linux/cpufreq.h | 2 ++
2 files changed, 49 insertions(+), 7 deletions(-)
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -86,11 +86,13 @@ static int set_target(struct cpufreq_pol
struct dev_pm_opp *opp;
struct cpufreq_frequency_table *freq_table = policy->freq_table;
struct clk *cpu_clk = policy->clk;
+ struct clk *l2_clk = policy->l2_clk;
struct private_data *priv = policy->driver_data;
struct device *cpu_dev = priv->cpu_dev;
struct regulator *cpu_reg = priv->cpu_reg;
unsigned long volt = 0, volt_old = 0, tol = 0;
- unsigned int old_freq, new_freq;
+ unsigned int old_freq, new_freq, l2_freq;
+ unsigned long new_l2_freq = 0;
long freq_Hz, freq_exact;
unsigned long opp_freq = 0;
int ret;
@@ -146,6 +148,30 @@ static int set_target(struct cpufreq_pol
goto out;
}
+ if (!IS_ERR(l2_clk) && policy->l2_rate[0] && policy->l2_rate[1] &&
+ policy->l2_rate[2]) {
+ static unsigned long krait_l2[CONFIG_NR_CPUS] = { };
+ int cpu, ret = 0;
+
+ if (freq_exact >= policy->l2_rate[2])
+ new_l2_freq = policy->l2_rate[2];
+ else if (freq_exact >= policy->l2_rate[1])
+ new_l2_freq = policy->l2_rate[1];
+ else
+ new_l2_freq = policy->l2_rate[0];
+
+ krait_l2[policy->cpu] = new_l2_freq;
+ for_each_present_cpu(cpu)
+ new_l2_freq = max(new_l2_freq, krait_l2[cpu]);
+
+ l2_freq = clk_get_rate(l2_clk);
+
+ if (l2_freq != new_l2_freq) {
+ /* scale l2 with the core */
+ ret = clk_set_rate(l2_clk, new_l2_freq);
+ }
+ }
+
/* scaling down? scale voltage after frequency */
if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
@@ -156,18 +182,21 @@ static int set_target(struct cpufreq_pol
goto out;
}
}
+
priv->opp_freq = opp_freq;
+
out:
mutex_unlock(&priv->lock);
return ret;
}
static int allocate_resources(int cpu, struct device **cdev,
- struct regulator **creg, struct clk **cclk)
+ struct regulator **creg, struct clk **cclk,
+ struct clk **l2)
{
struct device *cpu_dev;
struct regulator *cpu_reg;
- struct clk *cpu_clk;
+ struct clk *cpu_clk, *l2_clk = NULL;
int ret = 0;
char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
@@ -227,6 +256,10 @@ try_again:
*cdev = cpu_dev;
*creg = cpu_reg;
*cclk = cpu_clk;
+
+ l2_clk = clk_get(cpu_dev, "l2");
+ if (!IS_ERR(l2_clk))
+ *l2 = l2_clk;
}
return ret;
@@ -236,18 +269,20 @@ static int cpufreq_init(struct cpufreq_p
{
struct cpufreq_frequency_table *freq_table;
struct device_node *np;
+ struct device_node *l2_np;
struct private_data *priv;
struct device *cpu_dev;
struct regulator *cpu_reg;
- struct clk *cpu_clk;
struct dev_pm_opp *suspend_opp;
+ struct clk *cpu_clk, *l2_clk;
unsigned long min_uV = ~0, max_uV = 0;
unsigned int transition_latency;
bool need_update = false;
int ret;
struct srcu_notifier_head *opp_srcu_head;
- ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
+ ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk,
+ &l2_clk);
if (ret) {
pr_err("%s: Failed to allocate resources: %d\n", __func__, ret);
return ret;
@@ -398,6 +433,11 @@ static int cpufreq_init(struct cpufreq_p
if (suspend_opp)
policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000;
rcu_read_unlock();
+ policy->l2_clk = l2_clk;
+
+ l2_np = of_find_node_by_name(NULL, "qcom,l2");
+ if (l2_np)
+ of_property_read_u32_array(l2_np, "qcom,l2-rates", policy->l2_rate, 3);
ret = cpufreq_table_validate_and_show(policy, freq_table);
if (ret) {
@@ -498,7 +538,7 @@ static int dt_cpufreq_probe(struct platf
{
struct device *cpu_dev;
struct regulator *cpu_reg;
- struct clk *cpu_clk;
+ struct clk *cpu_clk, *l2_clk;
int ret;
/*
@@ -508,7 +548,7 @@ static int dt_cpufreq_probe(struct platf
*
* FIXME: Is checking this only for CPU0 sufficient ?
*/
- ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
+ ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk, &l2_clk);
if (ret)
return ret;
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -67,6 +67,8 @@ struct cpufreq_policy {
unsigned int cpu; /* cpu managing this policy, must be online */
struct clk *clk;
+ struct clk *l2_clk; /* L2 clock */
+ unsigned int l2_rate[3]; /* L2 bus clock rate thresholds */
struct cpufreq_cpuinfo cpuinfo;/* see above */
unsigned int min; /* in kHz */

View File

@ -0,0 +1,33 @@
From dafae9c5b39e2871bfd8db0b4bad6e850e42ef49 Mon Sep 17 00:00:00 2001
From: Georgi Djakov <georgi.djakov@linaro.org>
Date: Wed, 13 Jan 2016 15:10:25 +0200
Subject: cpufreq-dt: Add missing rcu_read_lock() for find_device_opp()
The function dev_pm_opp_get_notifier() must be called with held
rcu_read_lock. In order to keep the pointer valid, add rcu_read_lock().
Signed-off-by: Georgi Djakov <georgi.djakov@linaro.org>
---
drivers/cpufreq/cpufreq-dt.c | 3 +++
1 file changed, 3 insertions(+)
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -359,14 +359,17 @@ static int cpufreq_init(struct cpufreq_p
mutex_init(&priv->lock);
+ rcu_read_lock();
opp_srcu_head = dev_pm_opp_get_notifier(cpu_dev);
if (IS_ERR(opp_srcu_head)) {
ret = PTR_ERR(opp_srcu_head);
+ rcu_read_unlock();
goto out_free_priv;
}
priv->opp_nb.notifier_call = opp_notifier;
ret = srcu_notifier_chain_register(opp_srcu_head, &priv->opp_nb);
+ rcu_read_unlock();
if (ret)
goto out_free_priv;