2019-06-04 16:11:33 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2014-05-05 21:33:50 +08:00
|
|
|
/*
|
2015-09-04 16:17:25 +08:00
|
|
|
* Generic OPP helper interface for CPU device
|
2014-05-05 21:33:50 +08:00
|
|
|
*
|
|
|
|
* Copyright (C) 2009-2014 Texas Instruments Incorporated.
|
|
|
|
* Nishanth Menon
|
|
|
|
* Romit Dasgupta
|
|
|
|
* Kevin Hilman
|
|
|
|
*/
|
2015-10-17 12:15:18 +08:00
|
|
|
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2015-09-04 16:17:26 +08:00
|
|
|
#include <linux/cpu.h>
|
2014-05-05 21:33:50 +08:00
|
|
|
#include <linux/cpufreq.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
|
2015-09-04 16:17:26 +08:00
|
|
|
#include "opp.h"
|
|
|
|
|
2015-09-04 16:17:25 +08:00
|
|
|
#ifdef CONFIG_CPU_FREQ
|
2015-09-04 16:17:26 +08:00
|
|
|
|
2014-05-05 21:33:50 +08:00
|
|
|
/**
|
|
|
|
* dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
|
|
|
|
* @dev: device for which we do this operation
|
|
|
|
* @table: Cpufreq table returned back to caller
|
|
|
|
*
|
|
|
|
* Generate a cpufreq table for a provided device- this assumes that the
|
2016-02-16 16:47:53 +08:00
|
|
|
* opp table is already initialized and ready for usage.
|
2014-05-05 21:33:50 +08:00
|
|
|
*
|
|
|
|
* This function allocates required memory for the cpufreq table. It is
|
|
|
|
* expected that the caller does the required maintenance such as freeing
|
|
|
|
* the table as required.
|
|
|
|
*
|
|
|
|
* Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
|
|
|
|
* if no memory available for the operation (table is not populated), returns 0
|
|
|
|
* if successful and table is populated.
|
|
|
|
*
|
|
|
|
* WARNING: It is important for the callers to ensure refreshing their copy of
|
|
|
|
* the table if any of the mentioned functions have been invoked in the interim.
|
|
|
|
*/
|
|
|
|
int dev_pm_opp_init_cpufreq_table(struct device *dev,
|
|
|
|
struct cpufreq_frequency_table **table)
|
|
|
|
{
|
|
|
|
struct dev_pm_opp *opp;
|
|
|
|
struct cpufreq_frequency_table *freq_table = NULL;
|
|
|
|
int i, max_opps, ret = 0;
|
|
|
|
unsigned long rate;
|
|
|
|
|
|
|
|
max_opps = dev_pm_opp_get_opp_count(dev);
|
2017-01-23 12:41:47 +08:00
|
|
|
if (max_opps <= 0)
|
|
|
|
return max_opps ? max_opps : -ENODATA;
|
2014-05-05 21:33:50 +08:00
|
|
|
|
2018-01-26 16:48:49 +08:00
|
|
|
freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL);
|
2017-01-23 12:41:47 +08:00
|
|
|
if (!freq_table)
|
|
|
|
return -ENOMEM;
|
2014-05-05 21:33:50 +08:00
|
|
|
|
|
|
|
for (i = 0, rate = 0; i < max_opps; i++, rate++) {
|
|
|
|
/* find next rate */
|
|
|
|
opp = dev_pm_opp_find_freq_ceil(dev, &rate);
|
|
|
|
if (IS_ERR(opp)) {
|
|
|
|
ret = PTR_ERR(opp);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
freq_table[i].driver_data = i;
|
|
|
|
freq_table[i].frequency = rate / 1000;
|
2015-07-29 18:53:08 +08:00
|
|
|
|
|
|
|
/* Is Boost/turbo opp ? */
|
|
|
|
if (dev_pm_opp_is_turbo(opp))
|
|
|
|
freq_table[i].flags = CPUFREQ_BOOST_FREQ;
|
2017-01-23 12:41:47 +08:00
|
|
|
|
|
|
|
dev_pm_opp_put(opp);
|
2014-05-05 21:33:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
freq_table[i].driver_data = i;
|
|
|
|
freq_table[i].frequency = CPUFREQ_TABLE_END;
|
|
|
|
|
|
|
|
*table = &freq_table[0];
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (ret)
|
|
|
|
kfree(freq_table);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* dev_pm_opp_free_cpufreq_table() - free the cpufreq table
|
|
|
|
* @dev: device for which we do this operation
|
|
|
|
* @table: table to free
|
|
|
|
*
|
|
|
|
* Free up the table allocated by dev_pm_opp_init_cpufreq_table
|
|
|
|
*/
|
|
|
|
void dev_pm_opp_free_cpufreq_table(struct device *dev,
|
|
|
|
struct cpufreq_frequency_table **table)
|
|
|
|
{
|
|
|
|
if (!table)
|
|
|
|
return;
|
|
|
|
|
|
|
|
kfree(*table);
|
|
|
|
*table = NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
|
2015-09-04 16:17:25 +08:00
|
|
|
#endif /* CONFIG_CPU_FREQ */
|
2015-09-04 16:17:26 +08:00
|
|
|
|
2018-09-13 15:44:36 +08:00
|
|
|
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask,
|
2018-09-13 15:39:27 +08:00
|
|
|
int last_cpu)
|
2015-09-04 16:17:26 +08:00
|
|
|
{
|
|
|
|
struct device *cpu_dev;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
WARN_ON(cpumask_empty(cpumask));
|
|
|
|
|
|
|
|
for_each_cpu(cpu, cpumask) {
|
2018-09-13 15:39:27 +08:00
|
|
|
if (cpu == last_cpu)
|
|
|
|
break;
|
|
|
|
|
2015-09-04 16:17:26 +08:00
|
|
|
cpu_dev = get_cpu_device(cpu);
|
|
|
|
if (!cpu_dev) {
|
|
|
|
pr_err("%s: failed to get cpu%d device\n", __func__,
|
|
|
|
cpu);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-08-20 15:48:23 +08:00
|
|
|
dev_pm_opp_remove_table(cpu_dev);
|
2015-09-04 16:17:26 +08:00
|
|
|
}
|
|
|
|
}
|
2016-05-03 22:05:04 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* dev_pm_opp_cpumask_remove_table() - Removes OPP table for @cpumask
|
|
|
|
* @cpumask: cpumask for which OPP table needs to be removed
|
|
|
|
*
|
|
|
|
* This removes the OPP tables for CPUs present in the @cpumask.
|
|
|
|
* This should be used to remove all the OPPs entries associated with
|
|
|
|
* the cpus in @cpumask.
|
|
|
|
*/
|
|
|
|
void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
|
|
|
|
{
|
2018-09-13 15:44:36 +08:00
|
|
|
_dev_pm_opp_cpumask_remove_table(cpumask, -1);
|
2016-05-03 22:05:04 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
|
|
|
|
|
2016-04-21 16:58:56 +08:00
|
|
|
/**
|
|
|
|
* dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs
|
|
|
|
* @cpu_dev: CPU device for which we do this operation
|
|
|
|
* @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev
|
|
|
|
*
|
|
|
|
* This marks OPP table of the @cpu_dev as shared by the CPUs present in
|
|
|
|
* @cpumask.
|
|
|
|
*
|
|
|
|
* Returns -ENODEV if OPP table isn't already present.
|
|
|
|
*/
|
2016-04-27 11:22:22 +08:00
|
|
|
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
|
2016-04-30 19:33:29 +08:00
|
|
|
const struct cpumask *cpumask)
|
2016-04-21 16:58:56 +08:00
|
|
|
{
|
|
|
|
struct opp_device *opp_dev;
|
|
|
|
struct opp_table *opp_table;
|
|
|
|
struct device *dev;
|
|
|
|
int cpu, ret = 0;
|
|
|
|
|
|
|
|
opp_table = _find_opp_table(cpu_dev);
|
2017-01-23 12:41:48 +08:00
|
|
|
if (IS_ERR(opp_table))
|
|
|
|
return PTR_ERR(opp_table);
|
2016-04-21 16:58:56 +08:00
|
|
|
|
|
|
|
for_each_cpu(cpu, cpumask) {
|
|
|
|
if (cpu == cpu_dev->id)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
dev = get_cpu_device(cpu);
|
|
|
|
if (!dev) {
|
|
|
|
dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
|
|
|
|
__func__, cpu);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
opp_dev = _add_opp_dev(dev, opp_table);
|
|
|
|
if (!opp_dev) {
|
|
|
|
dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n",
|
|
|
|
__func__, cpu);
|
|
|
|
continue;
|
|
|
|
}
|
2016-04-21 16:58:57 +08:00
|
|
|
|
|
|
|
/* Mark opp-table as multiple CPUs are sharing it now */
|
2016-06-16 21:33:11 +08:00
|
|
|
opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
|
2016-04-21 16:58:56 +08:00
|
|
|
}
|
2017-01-23 12:41:48 +08:00
|
|
|
|
|
|
|
dev_pm_opp_put_opp_table(opp_table);
|
2016-04-21 16:58:56 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
|
2016-04-27 11:22:23 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev
|
|
|
|
* @cpu_dev: CPU device for which we do this operation
|
|
|
|
* @cpumask: cpumask to update with information of sharing CPUs
|
|
|
|
*
|
|
|
|
* This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
|
|
|
|
*
|
2016-06-16 21:33:11 +08:00
|
|
|
* Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP
|
|
|
|
* table's status is access-unknown.
|
2016-04-27 11:22:23 +08:00
|
|
|
*/
|
2016-04-30 19:33:29 +08:00
|
|
|
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
|
2016-04-27 11:22:23 +08:00
|
|
|
{
|
|
|
|
struct opp_device *opp_dev;
|
|
|
|
struct opp_table *opp_table;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
opp_table = _find_opp_table(cpu_dev);
|
2017-01-23 12:41:48 +08:00
|
|
|
if (IS_ERR(opp_table))
|
|
|
|
return PTR_ERR(opp_table);
|
2016-04-27 11:22:23 +08:00
|
|
|
|
2016-06-16 21:33:11 +08:00
|
|
|
if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) {
|
|
|
|
ret = -EINVAL;
|
2017-01-23 12:41:48 +08:00
|
|
|
goto put_opp_table;
|
2016-06-16 21:33:11 +08:00
|
|
|
}
|
|
|
|
|
2016-04-27 11:22:23 +08:00
|
|
|
cpumask_clear(cpumask);
|
|
|
|
|
2016-06-16 21:33:11 +08:00
|
|
|
if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
|
2018-08-03 09:35:21 +08:00
|
|
|
mutex_lock(&opp_table->lock);
|
2016-04-27 11:22:23 +08:00
|
|
|
list_for_each_entry(opp_dev, &opp_table->dev_list, node)
|
|
|
|
cpumask_set_cpu(opp_dev->dev->id, cpumask);
|
2018-08-03 09:35:21 +08:00
|
|
|
mutex_unlock(&opp_table->lock);
|
2016-04-27 11:22:23 +08:00
|
|
|
} else {
|
|
|
|
cpumask_set_cpu(cpu_dev->id, cpumask);
|
|
|
|
}
|
|
|
|
|
2017-01-23 12:41:48 +08:00
|
|
|
put_opp_table:
|
|
|
|
dev_pm_opp_put_opp_table(opp_table);
|
2016-04-27 11:22:23 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);
|