summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/core.c34
-rw-r--r--drivers/base/memory.c8
-rw-r--r--drivers/base/platform-msi.c6
-rw-r--r--drivers/base/platform.c35
-rw-r--r--drivers/base/power/domain.c343
-rw-r--r--drivers/base/power/runtime.c63
-rw-r--r--drivers/base/property.c513
-rw-r--r--drivers/base/regmap/regcache-rbtree.c12
-rw-r--r--drivers/base/regmap/regmap-debugfs.c12
-rw-r--r--drivers/base/regmap/regmap-irq.c142
-rw-r--r--drivers/base/swnode.c675
12 files changed, 1064 insertions, 781 deletions
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 704f44295810..157452080f3d 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,7 +6,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o \
topology.o container.o property.o cacheinfo.o \
- devcon.o
+ devcon.o swnode.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-y += power/
obj-$(CONFIG_ISA_BUS_API) += isa.o
diff --git a/drivers/base/core.c b/drivers/base/core.c
index a4ced331bc50..0073b09bb99f 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -8,6 +8,7 @@
* Copyright (c) 2006 Novell, Inc.
*/
+#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/fwnode.h>
@@ -728,6 +729,26 @@ static inline int device_is_not_partition(struct device *dev)
}
#endif
+static int
+device_platform_notify(struct device *dev, enum kobject_action action)
+{
+ int ret;
+
+ ret = acpi_platform_notify(dev, action);
+ if (ret)
+ return ret;
+
+ ret = software_node_notify(dev, action);
+ if (ret)
+ return ret;
+
+ if (platform_notify && action == KOBJ_ADD)
+ platform_notify(dev);
+ else if (platform_notify_remove && action == KOBJ_REMOVE)
+ platform_notify_remove(dev);
+ return 0;
+}
+
/**
* dev_driver_string - Return a device's driver name, if at all possible
* @dev: struct device to get the name of
@@ -1895,8 +1916,9 @@ int device_add(struct device *dev)
}
/* notify platform of device entry */
- if (platform_notify)
- platform_notify(dev);
+ error = device_platform_notify(dev, KOBJ_ADD);
+ if (error)
+ goto platform_error;
error = device_create_file(dev, &dev_attr_uevent);
if (error)
@@ -1972,6 +1994,8 @@ done:
SymlinkError:
device_remove_file(dev, &dev_attr_uevent);
attrError:
+ device_platform_notify(dev, KOBJ_REMOVE);
+platform_error:
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
glue_dir = get_glue_dir(dev);
kobject_del(&dev->kobj);
@@ -2089,14 +2113,10 @@ void device_del(struct device *dev)
bus_remove_device(dev);
device_pm_remove(dev);
driver_deferred_probe_del(dev);
+ device_platform_notify(dev, KOBJ_REMOVE);
device_remove_properties(dev);
device_links_purge(dev);
- /* Notify the platform of the removal, in case they
- * need to do anything...
- */
- if (platform_notify_remove)
- platform_notify_remove(dev);
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_REMOVED_DEVICE, dev);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index ea35b3dc1442..048cbf7d5233 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -207,15 +207,15 @@ static bool pages_correctly_probed(unsigned long start_pfn)
return false;
if (!present_section_nr(section_nr)) {
- pr_warn("section %ld pfn[%lx, %lx) not present",
+ pr_warn("section %ld pfn[%lx, %lx) not present\n",
section_nr, pfn, pfn + PAGES_PER_SECTION);
return false;
} else if (!valid_section_nr(section_nr)) {
- pr_warn("section %ld pfn[%lx, %lx) no valid memmap",
+ pr_warn("section %ld pfn[%lx, %lx) no valid memmap\n",
section_nr, pfn, pfn + PAGES_PER_SECTION);
return false;
} else if (online_section_nr(section_nr)) {
- pr_warn("section %ld pfn[%lx, %lx) is already online",
+ pr_warn("section %ld pfn[%lx, %lx) is already online\n",
section_nr, pfn, pfn + PAGES_PER_SECTION);
return false;
}
@@ -681,7 +681,7 @@ static int add_memory_block(int base_section_nr)
int i, ret, section_count = 0, section_nr;
for (i = base_section_nr;
- (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS;
+ i < base_section_nr + sections_per_block;
i++) {
if (!present_section_nr(i))
continue;
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index f39a920496fb..8da314b81eab 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -368,14 +368,16 @@ void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nvec)
{
struct platform_msi_priv_data *data = domain->host_data;
- struct msi_desc *desc;
- for_each_msi_entry(desc, data->dev) {
+ struct msi_desc *desc, *tmp;
+ for_each_msi_entry_safe(desc, tmp, data->dev) {
if (WARN_ON(!desc->irq || desc->nvec_used != 1))
return;
if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
continue;
irq_domain_free_irqs_common(domain, desc->irq, 1);
+ list_del(&desc->list);
+ free_msi_entry(desc);
}
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index e1ba610482c0..be6c1eb3cbe2 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -448,7 +448,6 @@ void platform_device_del(struct platform_device *pdev)
int i;
if (!IS_ERR_OR_NULL(pdev)) {
- device_remove_properties(&pdev->dev);
device_del(&pdev->dev);
if (pdev->id_auto) {
@@ -1138,8 +1137,7 @@ int platform_dma_configure(struct device *dev)
ret = of_dma_configure(dev, dev->of_node, true);
} else if (has_acpi_companion(dev)) {
attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
- if (attr != DEV_DMA_NOT_SUPPORTED)
- ret = acpi_dma_configure(dev, attr);
+ ret = acpi_dma_configure(dev, attr);
}
return ret;
@@ -1179,37 +1177,6 @@ int __init platform_bus_init(void)
return error;
}
-#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
-static u64 dma_default_get_required_mask(struct device *dev)
-{
- u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
- u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
- u64 mask;
-
- if (!high_totalram) {
- /* convert to mask just covering totalram */
- low_totalram = (1 << (fls(low_totalram) - 1));
- low_totalram += low_totalram - 1;
- mask = low_totalram;
- } else {
- high_totalram = (1 << (fls(high_totalram) - 1));
- high_totalram += high_totalram - 1;
- mask = (((u64)high_totalram) << 32) + 0xffffffff;
- }
- return mask;
-}
-
-u64 dma_get_required_mask(struct device *dev)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (ops->get_required_mask)
- return ops->get_required_mask(dev);
- return dma_default_get_required_mask(dev);
-}
-EXPORT_SYMBOL_GPL(dma_get_required_mask);
-#endif
-
static __initdata LIST_HEAD(early_platform_driver_list);
static __initdata LIST_HEAD(early_platform_device_list);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 7f38a92b444a..500de1dee967 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -239,6 +239,127 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd)
static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
#endif
+static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state)
+{
+ struct generic_pm_domain_data *pd_data;
+ struct pm_domain_data *pdd;
+ struct gpd_link *link;
+
+ /* New requested state is same as Max requested state */
+ if (state == genpd->performance_state)
+ return state;
+
+ /* New requested state is higher than Max requested state */
+ if (state > genpd->performance_state)
+ return state;
+
+ /* Traverse all devices within the domain */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ pd_data = to_gpd_data(pdd);
+
+ if (pd_data->performance_state > state)
+ state = pd_data->performance_state;
+ }
+
+ /*
+ * Traverse all sub-domains within the domain. This can be
+ * done without any additional locking as the link->performance_state
+ * field is protected by the master genpd->lock, which is already taken.
+ *
+ * Also note that link->performance_state (subdomain's performance state
+ * requirement to master domain) is different from
+ * link->slave->performance_state (current performance state requirement
+ * of the devices/sub-domains of the subdomain) and so can have a
+ * different value.
+ *
+ * Note that we also take vote from powered-off sub-domains into account
+ * as the same is done for devices right now.
+ */
+ list_for_each_entry(link, &genpd->master_links, master_node) {
+ if (link->performance_state > state)
+ state = link->performance_state;
+ }
+
+ return state;
+}
+
+static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state, int depth)
+{
+ struct generic_pm_domain *master;
+ struct gpd_link *link;
+ int master_state, ret;
+
+ if (state == genpd->performance_state)
+ return 0;
+
+ /* Propagate to masters of genpd */
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
+ master = link->master;
+
+ if (!master->set_performance_state)
+ continue;
+
+ /* Find master's performance state */
+ ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
+ master->opp_table,
+ state);
+ if (unlikely(ret < 0))
+ goto err;
+
+ master_state = ret;
+
+ genpd_lock_nested(master, depth + 1);
+
+ link->prev_performance_state = link->performance_state;
+ link->performance_state = master_state;
+ master_state = _genpd_reeval_performance_state(master,
+ master_state);
+ ret = _genpd_set_performance_state(master, master_state, depth + 1);
+ if (ret)
+ link->performance_state = link->prev_performance_state;
+
+ genpd_unlock(master);
+
+ if (ret)
+ goto err;
+ }
+
+ ret = genpd->set_performance_state(genpd, state);
+ if (ret)
+ goto err;
+
+ genpd->performance_state = state;
+ return 0;
+
+err:
+ /* Encountered an error, lets rollback */
+ list_for_each_entry_continue_reverse(link, &genpd->slave_links,
+ slave_node) {
+ master = link->master;
+
+ if (!master->set_performance_state)
+ continue;
+
+ genpd_lock_nested(master, depth + 1);
+
+ master_state = link->prev_performance_state;
+ link->performance_state = master_state;
+
+ master_state = _genpd_reeval_performance_state(master,
+ master_state);
+ if (_genpd_set_performance_state(master, master_state, depth + 1)) {
+ pr_err("%s: Failed to roll back to %d performance state\n",
+ master->name, master_state);
+ }
+
+ genpd_unlock(master);
+ }
+
+ return ret;
+}
+
/**
* dev_pm_genpd_set_performance_state- Set performance state of device's power
* domain.
@@ -257,10 +378,9 @@ static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
{
struct generic_pm_domain *genpd;
- struct generic_pm_domain_data *gpd_data, *pd_data;
- struct pm_domain_data *pdd;
+ struct generic_pm_domain_data *gpd_data;
unsigned int prev;
- int ret = 0;
+ int ret;
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
@@ -281,47 +401,11 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
prev = gpd_data->performance_state;
gpd_data->performance_state = state;
- /* New requested state is same as Max requested state */
- if (state == genpd->performance_state)
- goto unlock;
-
- /* New requested state is higher than Max requested state */
- if (state > genpd->performance_state)
- goto update_state;
-
- /* Traverse all devices within the domain */
- list_for_each_entry(pdd, &genpd->dev_list, list_node) {
- pd_data = to_gpd_data(pdd);
-
- if (pd_data->performance_state > state)
- state = pd_data->performance_state;
- }
-
- if (state == genpd->performance_state)
- goto unlock;
-
- /*
- * We aren't propagating performance state changes of a subdomain to its
- * masters as we don't have hardware that needs it. Over that, the
- * performance states of subdomain and its masters may not have
- * one-to-one mapping and would require additional information. We can
- * get back to this once we have hardware that needs it. For that
- * reason, we don't have to consider performance state of the subdomains
- * of genpd here.
- */
-
-update_state:
- if (genpd_status_on(genpd)) {
- ret = genpd->set_performance_state(genpd, state);
- if (ret) {
- gpd_data->performance_state = prev;
- goto unlock;
- }
- }
-
- genpd->performance_state = state;
+ state = _genpd_reeval_performance_state(genpd, state);
+ ret = _genpd_set_performance_state(genpd, state, 0);
+ if (ret)
+ gpd_data->performance_state = prev;
-unlock:
genpd_unlock(genpd);
return ret;
@@ -347,15 +431,6 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
return ret;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
-
- if (unlikely(genpd->set_performance_state)) {
- ret = genpd->set_performance_state(genpd, genpd->performance_state);
- if (ret) {
- pr_warn("%s: Failed to set performance state %d (%d)\n",
- genpd->name, genpd->performance_state, ret);
- }
- }
-
if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
return ret;
@@ -1907,12 +1982,21 @@ int of_genpd_add_provider_simple(struct device_node *np,
ret);
goto unlock;
}
+
+ /*
+ * Save table for faster processing while setting performance
+ * state.
+ */
+ genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
+ WARN_ON(!genpd->opp_table);
}
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
if (ret) {
- if (genpd->set_performance_state)
+ if (genpd->set_performance_state) {
+ dev_pm_opp_put_opp_table(genpd->opp_table);
dev_pm_opp_of_remove_table(&genpd->dev);
+ }
goto unlock;
}
@@ -1965,6 +2049,13 @@ int of_genpd_add_provider_onecell(struct device_node *np,
i, ret);
goto error;
}
+
+ /*
+ * Save table for faster processing while setting
+ * performance state.
+ */
+ genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
+ WARN_ON(!genpd->opp_table);
}
genpd->provider = &np->fwnode;
@@ -1989,8 +2080,10 @@ error:
genpd->provider = NULL;
genpd->has_provider = false;
- if (genpd->set_performance_state)
+ if (genpd->set_performance_state) {
+ dev_pm_opp_put_opp_table(genpd->opp_table);
dev_pm_opp_of_remove_table(&genpd->dev);
+ }
}
mutex_unlock(&gpd_list_lock);
@@ -2024,6 +2117,7 @@ void of_genpd_del_provider(struct device_node *np)
if (!gpd->set_performance_state)
continue;
+ dev_pm_opp_put_opp_table(gpd->opp_table);
dev_pm_opp_of_remove_table(&gpd->dev);
}
}
@@ -2338,7 +2432,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
unsigned int index)
{
- struct device *genpd_dev;
+ struct device *virt_dev;
int num_domains;
int ret;
@@ -2352,31 +2446,31 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
return NULL;
/* Allocate and register device on the genpd bus. */
- genpd_dev = kzalloc(sizeof(*genpd_dev), GFP_KERNEL);
- if (!genpd_dev)
+ virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
+ if (!virt_dev)
return ERR_PTR(-ENOMEM);
- dev_set_name(genpd_dev, "genpd:%u:%s", index, dev_name(dev));
- genpd_dev->bus = &genpd_bus_type;
- genpd_dev->release = genpd_release_dev;
+ dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
+ virt_dev->bus = &genpd_bus_type;
+ virt_dev->release = genpd_release_dev;
- ret = device_register(genpd_dev);
+ ret = device_register(virt_dev);
if (ret) {
- kfree(genpd_dev);
+ kfree(virt_dev);
return ERR_PTR(ret);
}
/* Try to attach the device to the PM domain at the specified index. */
- ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false);
+ ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false);
if (ret < 1) {
- device_unregister(genpd_dev);
+ device_unregister(virt_dev);
return ret ? ERR_PTR(ret) : NULL;
}
- pm_runtime_enable(genpd_dev);
- genpd_queue_power_off_work(dev_to_genpd(genpd_dev));
+ pm_runtime_enable(virt_dev);
+ genpd_queue_power_off_work(dev_to_genpd(virt_dev));
- return genpd_dev;
+ return virt_dev;
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
@@ -2521,52 +2615,36 @@ int of_genpd_parse_idle_states(struct device_node *dn,
EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
/**
- * of_genpd_opp_to_performance_state- Gets performance state of device's
- * power domain corresponding to a DT node's "required-opps" property.
+ * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
*
- * @dev: Device for which the performance-state needs to be found.
- * @np: DT node where the "required-opps" property is present. This can be
- * the device node itself (if it doesn't have an OPP table) or a node
- * within the OPP table of a device (if device has an OPP table).
+ * @genpd_dev: Genpd's device for which the performance-state needs to be found.
+ * @opp: struct dev_pm_opp of the OPP for which we need to find performance
+ * state.
*
- * Returns performance state corresponding to the "required-opps" property of
- * a DT node. This calls platform specific genpd->opp_to_performance_state()
- * callback to translate power domain OPP to performance state.
+ * Returns performance state encoded in the OPP of the genpd. This calls
+ * platform specific genpd->opp_to_performance_state() callback to translate
+ * power domain OPP to performance state.
*
* Returns performance state on success and 0 on failure.
*/
-unsigned int of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *np)
+unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
+ struct dev_pm_opp *opp)
{
- struct generic_pm_domain *genpd;
- struct dev_pm_opp *opp;
- int state = 0;
+ struct generic_pm_domain *genpd = NULL;
+ int state;
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return 0;
+ genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
- if (unlikely(!genpd->set_performance_state))
+ if (unlikely(!genpd->opp_to_performance_state))
return 0;
genpd_lock(genpd);
-
- opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np);
- if (IS_ERR(opp)) {
- dev_err(dev, "Failed to find required OPP: %ld\n",
- PTR_ERR(opp));
- goto unlock;
- }
-
state = genpd->opp_to_performance_state(genpd, opp);
- dev_pm_opp_put(opp);
-
-unlock:
genpd_unlock(genpd);
return state;
}
-EXPORT_SYMBOL_GPL(of_genpd_opp_to_performance_state);
+EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
static int __init genpd_bus_init(void)
{
@@ -2671,7 +2749,7 @@ exit:
return 0;
}
-static int genpd_summary_show(struct seq_file *s, void *data)
+static int summary_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd;
int ret = 0;
@@ -2694,7 +2772,7 @@ static int genpd_summary_show(struct seq_file *s, void *data)
return ret;
}
-static int genpd_status_show(struct seq_file *s, void *data)
+static int status_show(struct seq_file *s, void *data)
{
static const char * const status_lookup[] = {
[GPD_STATE_ACTIVE] = "on",
@@ -2721,7 +2799,7 @@ exit:
return ret;
}
-static int genpd_sub_domains_show(struct seq_file *s, void *data)
+static int sub_domains_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
struct gpd_link *link;
@@ -2738,7 +2816,7 @@ static int genpd_sub_domains_show(struct seq_file *s, void *data)
return ret;
}
-static int genpd_idle_states_show(struct seq_file *s, void *data)
+static int idle_states_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
unsigned int i;
@@ -2767,7 +2845,7 @@ static int genpd_idle_states_show(struct seq_file *s, void *data)
return ret;
}
-static int genpd_active_time_show(struct seq_file *s, void *data)
+static int active_time_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
ktime_t delta = 0;
@@ -2787,7 +2865,7 @@ static int genpd_active_time_show(struct seq_file *s, void *data)
return ret;
}
-static int genpd_total_idle_time_show(struct seq_file *s, void *data)
+static int total_idle_time_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
ktime_t delta = 0, total = 0;
@@ -2815,7 +2893,7 @@ static int genpd_total_idle_time_show(struct seq_file *s, void *data)
}
-static int genpd_devices_show(struct seq_file *s, void *data)
+static int devices_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
struct pm_domain_data *pm_data;
@@ -2841,7 +2919,7 @@ static int genpd_devices_show(struct seq_file *s, void *data)
return ret;
}
-static int genpd_perf_state_show(struct seq_file *s, void *data)
+static int perf_state_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
@@ -2854,37 +2932,14 @@ static int genpd_perf_state_show(struct seq_file *s, void *data)
return 0;
}
-#define define_genpd_open_function(name) \
-static int genpd_##name##_open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, genpd_##name##_show, inode->i_private); \
-}
-
-define_genpd_open_function(summary);
-define_genpd_open_function(status);
-define_genpd_open_function(sub_domains);
-define_genpd_open_function(idle_states);
-define_genpd_open_function(active_time);
-define_genpd_open_function(total_idle_time);
-define_genpd_open_function(devices);
-define_genpd_open_function(perf_state);
-
-#define define_genpd_debugfs_fops(name) \
-static const struct file_operations genpd_##name##_fops = { \
- .open = genpd_##name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
-}
-
-define_genpd_debugfs_fops(summary);
-define_genpd_debugfs_fops(status);
-define_genpd_debugfs_fops(sub_domains);
-define_genpd_debugfs_fops(idle_states);
-define_genpd_debugfs_fops(active_time);
-define_genpd_debugfs_fops(total_idle_time);
-define_genpd_debugfs_fops(devices);
-define_genpd_debugfs_fops(perf_state);
+DEFINE_SHOW_ATTRIBUTE(summary);
+DEFINE_SHOW_ATTRIBUTE(status);
+DEFINE_SHOW_ATTRIBUTE(sub_domains);
+DEFINE_SHOW_ATTRIBUTE(idle_states);
+DEFINE_SHOW_ATTRIBUTE(active_time);
+DEFINE_SHOW_ATTRIBUTE(total_idle_time);
+DEFINE_SHOW_ATTRIBUTE(devices);
+DEFINE_SHOW_ATTRIBUTE(perf_state);
static int __init genpd_debug_init(void)
{
@@ -2897,7 +2952,7 @@ static int __init genpd_debug_init(void)
return -ENOMEM;
d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
- genpd_debugfs_dir, NULL, &genpd_summary_fops);
+ genpd_debugfs_dir, NULL, &summary_fops);
if (!d)
return -ENOMEM;
@@ -2907,20 +2962,20 @@ static int __init genpd_debug_init(void)
return -ENOMEM;
debugfs_create_file("current_state", 0444,
- d, genpd, &genpd_status_fops);
+ d, genpd, &status_fops);
debugfs_create_file("sub_domains", 0444,
- d, genpd, &genpd_sub_domains_fops);
+ d, genpd, &sub_domains_fops);
debugfs_create_file("idle_states", 0444,
- d, genpd, &genpd_idle_states_fops);
+ d, genpd, &idle_states_fops);
debugfs_create_file("active_time", 0444,
- d, genpd, &genpd_active_time_fops);
+ d, genpd, &active_time_fops);
debugfs_create_file("total_idle_time", 0444,
- d, genpd, &genpd_total_idle_time_fops);
+ d, genpd, &total_idle_time_fops);
debugfs_create_file("devices", 0444,
- d, genpd, &genpd_devices_fops);
+ d, genpd, &devices_fops);
if (genpd->set_performance_state)
debugfs_create_file("perf_state", 0444,
- d, genpd, &genpd_perf_state_fops);
+ d, genpd, &perf_state_fops);
}
return 0;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index beb85c31f3fa..70624695b6d5 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -8,6 +8,8 @@
*/
#include <linux/sched/mm.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
@@ -93,7 +95,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status)
static void pm_runtime_deactivate_timer(struct device *dev)
{
if (dev->power.timer_expires > 0) {
- del_timer(&dev->power.suspend_timer);
+ hrtimer_cancel(&dev->power.suspend_timer);
dev->power.timer_expires = 0;
}
}
@@ -124,12 +126,11 @@ static void pm_runtime_cancel_pending(struct device *dev)
* This function may be called either with or without dev->power.lock held.
* Either way it can be racy, since power.last_busy may be updated at any time.
*/
-unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
+u64 pm_runtime_autosuspend_expiration(struct device *dev)
{
int autosuspend_delay;
- long elapsed;
- unsigned long last_busy;
- unsigned long expires = 0;
+ u64 last_busy, expires = 0;
+ u64 now = ktime_to_ns(ktime_get());
if (!dev->power.use_autosuspend)
goto out;
@@ -139,19 +140,9 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
goto out;
last_busy = READ_ONCE(dev->power.last_busy);
- elapsed = jiffies - last_busy;
- if (elapsed < 0)
- goto out; /* jiffies has wrapped around. */
- /*
- * If the autosuspend_delay is >= 1 second, align the timer by rounding
- * up to the nearest second.
- */
- expires = last_busy + msecs_to_jiffies(autosuspend_delay);
- if (autosuspend_delay >= 1000)
- expires = round_jiffies(expires);
- expires += !expires;
- if (elapsed >= expires - last_busy)
+ expires = last_busy + autosuspend_delay * NSEC_PER_MSEC;
+ if (expires <= now)
expires = 0; /* Already expired. */
out:
@@ -515,7 +506,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
/* If the autosuspend_delay time hasn't expired yet, reschedule. */
if ((rpmflags & RPM_AUTO)
&& dev->power.runtime_status != RPM_SUSPENDING) {
- unsigned long expires = pm_runtime_autosuspend_expiration(dev);
+ u64 expires = pm_runtime_autosuspend_expiration(dev);
if (expires != 0) {
/* Pending requests need to be canceled. */
@@ -528,10 +519,20 @@ static int rpm_suspend(struct device *dev, int rpmflags)
* expire; pm_suspend_timer_fn() will take care of the
* rest.
*/
- if (!(dev->power.timer_expires && time_before_eq(
- dev->power.timer_expires, expires))) {
+ if (!(dev->power.timer_expires &&
+ dev->power.timer_expires <= expires)) {
+ /*
+ * We add a slack of 25% to gather wakeups
+ * without sacrificing the granularity.
+ */
+ u64 slack = READ_ONCE(dev->power.autosuspend_delay) *
+ (NSEC_PER_MSEC >> 2);
+
dev->power.timer_expires = expires;
- mod_timer(&dev->power.suspend_timer, expires);
+ hrtimer_start_range_ns(&dev->power.suspend_timer,
+ ns_to_ktime(expires),
+ slack,
+ HRTIMER_MODE_ABS);
}
dev->power.timer_autosuspends = 1;
goto out;
@@ -895,23 +896,25 @@ static void pm_runtime_work(struct work_struct *work)
*
* Check if the time is right and queue a suspend request.
*/
-static void pm_suspend_timer_fn(struct timer_list *t)
+static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
{
- struct device *dev = from_timer(dev, t, power.suspend_timer);
+ struct device *dev = container_of(timer, struct device, power.suspend_timer);
unsigned long flags;
- unsigned long expires;
+ u64 expires;
spin_lock_irqsave(&dev->power.lock, flags);
expires = dev->power.timer_expires;
/* If 'expire' is after 'jiffies' we've been called too early. */
- if (expires > 0 && !time_after(expires, jiffies)) {
+ if (expires > 0 && expires < ktime_to_ns(ktime_get())) {
dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ?
(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
}
spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return HRTIMER_NORESTART;
}
/**
@@ -922,6 +925,7 @@ static void pm_suspend_timer_fn(struct timer_list *t)
int pm_schedule_suspend(struct device *dev, unsigned int delay)
{
unsigned long flags;
+ ktime_t expires;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
@@ -938,10 +942,10 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
/* Other scheduled or pending requests need to be canceled. */
pm_runtime_cancel_pending(dev);
- dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
- dev->power.timer_expires += !dev->power.timer_expires;
+ expires = ktime_add(ktime_get(), ms_to_ktime(delay));
+ dev->power.timer_expires = ktime_to_ns(expires);
dev->power.timer_autosuspends = 0;
- mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
+ hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -1491,7 +1495,8 @@ void pm_runtime_init(struct device *dev)
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
- timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0);
+ hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ dev->power.suspend_timer.function = pm_suspend_timer_fn;
init_waitqueue_head(&dev->power.wait_queue);
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 240ab5230ff6..8b91ab380d14 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -18,236 +18,6 @@
#include <linux/etherdevice.h>
#include <linux/phy.h>
-struct property_set {
- struct device *dev;
- struct fwnode_handle fwnode;
- const struct property_entry *properties;
-};
-
-static const struct fwnode_operations pset_fwnode_ops;
-
-static inline bool is_pset_node(const struct fwnode_handle *fwnode)
-{
- return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &pset_fwnode_ops;
-}
-
-#define to_pset_node(__fwnode) \
- ({ \
- typeof(__fwnode) __to_pset_node_fwnode = __fwnode; \
- \
- is_pset_node(__to_pset_node_fwnode) ? \
- container_of(__to_pset_node_fwnode, \
- struct property_set, fwnode) : \
- NULL; \
- })
-
-static const struct property_entry *
-pset_prop_get(const struct property_set *pset, const char *name)
-{
- const struct property_entry *prop;
-
- if (!pset || !pset->properties)
- return NULL;
-
- for (prop = pset->properties; prop->name; prop++)
- if (!strcmp(name, prop->name))
- return prop;
-
- return NULL;
-}
-
-static const void *property_get_pointer(const struct property_entry *prop)
-{
- switch (prop->type) {
- case DEV_PROP_U8:
- if (prop->is_array)
- return prop->pointer.u8_data;
- return &prop->value.u8_data;
- case DEV_PROP_U16:
- if (prop->is_array)
- return prop->pointer.u16_data;
- return &prop->value.u16_data;
- case DEV_PROP_U32:
- if (prop->is_array)
- return prop->pointer.u32_data;
- return &prop->value.u32_data;
- case DEV_PROP_U64:
- if (prop->is_array)
- return prop->pointer.u64_data;
- return &prop->value.u64_data;
- case DEV_PROP_STRING:
- if (prop->is_array)
- return prop->pointer.str;
- return &prop->value.str;
- default:
- return NULL;
- }
-}
-
-static void property_set_pointer(struct property_entry *prop, const void *pointer)
-{
- switch (prop->type) {
- case DEV_PROP_U8:
- if (prop->is_array)
- prop->pointer.u8_data = pointer;
- else
- prop->value.u8_data = *((u8 *)pointer);
- break;
- case DEV_PROP_U16:
- if