summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig90
-rw-r--r--drivers/base/Makefile3
-rw-r--r--drivers/base/base.h3
-rw-r--r--drivers/base/bus.c16
-rw-r--r--drivers/base/cacheinfo.c157
-rw-r--r--drivers/base/core.c53
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/dd.c20
-rw-r--r--drivers/base/devres.c7
-rw-r--r--drivers/base/dma-coherent.c433
-rw-r--r--drivers/base/dma-contiguous.c278
-rw-r--r--drivers/base/dma-mapping.c370
-rw-r--r--drivers/base/driver.c6
-rw-r--r--drivers/base/firmware_loader/Kconfig154
-rw-r--r--drivers/base/firmware_loader/fallback.c57
-rw-r--r--drivers/base/firmware_loader/fallback.h20
-rw-r--r--drivers/base/firmware_loader/firmware.h37
-rw-r--r--drivers/base/firmware_loader/main.c57
-rw-r--r--drivers/base/memory.c8
-rw-r--r--drivers/base/node.c5
-rw-r--r--drivers/base/platform-msi.c3
-rw-r--r--drivers/base/platform.c35
-rw-r--r--drivers/base/power/common.c53
-rw-r--r--drivers/base/power/domain.c337
-rw-r--r--drivers/base/power/main.c44
-rw-r--r--drivers/base/power/power.h30
-rw-r--r--drivers/base/power/runtime.c2
-rw-r--r--drivers/base/power/sysfs.c2
-rw-r--r--drivers/base/power/wakeup.c96
-rw-r--r--drivers/base/property.c104
-rw-r--r--drivers/base/regmap/regmap-mmio.c3
-rw-r--r--drivers/base/regmap/regmap-slimbus.c2
32 files changed, 985 insertions, 1508 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 29b0eb452b3a..3e63a900b330 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -70,81 +70,25 @@ config STANDALONE
If unsure, say Y.
config PREVENT_FIRMWARE_BUILD
- bool "Prevent firmware from being built"
+ bool "Disable drivers features which enable custom firmware building"
default y
help
- Say yes to avoid building firmware. Firmware is usually shipped
- with the driver and only when updating the firmware should a
- rebuild be made.
- If unsure, say Y here.
-
-config FW_LOADER
- tristate "Userspace firmware loading support" if EXPERT
- default y
- ---help---
- This option is provided for the case where none of the in-tree modules
- require userspace firmware loading support, but a module built
- out-of-tree does.
-
-config EXTRA_FIRMWARE
- string "External firmware blobs to build into the kernel binary"
- depends on FW_LOADER
- help
- Various drivers in the kernel source tree may require firmware,
- which is generally available in your distribution's linux-firmware
- package.
-
- The linux-firmware package should install firmware into
- /lib/firmware/ on your system, so they can be loaded by userspace
- helpers on request.
-
- This option allows firmware to be built into the kernel for the case
- where the user either cannot or doesn't want to provide it from
- userspace at runtime (for example, when the firmware in question is
- required for accessing the boot device, and the user doesn't want to
- use an initrd).
-
- This option is a string and takes the (space-separated) names of the
- firmware files -- the same names that appear in MODULE_FIRMWARE()
- and request_firmware() in the source. These files should exist under
- the directory specified by the EXTRA_FIRMWARE_DIR option, which is
- /lib/firmware by default.
-
- For example, you might set CONFIG_EXTRA_FIRMWARE="usb8388.bin", copy
- the usb8388.bin file into /lib/firmware, and build the kernel. Then
- any request_firmware("usb8388.bin") will be satisfied internally
- without needing to call out to userspace.
-
- WARNING: If you include additional firmware files into your binary
- kernel image that are not available under the terms of the GPL,
- then it may be a violation of the GPL to distribute the resulting
- image since it combines both GPL and non-GPL work. You should
- consult a lawyer of your own before distributing such an image.
-
-config EXTRA_FIRMWARE_DIR
- string "Firmware blobs root directory"
- depends on EXTRA_FIRMWARE != ""
- default "/lib/firmware"
- help
- This option controls the directory in which the kernel build system
- looks for the firmware files listed in the EXTRA_FIRMWARE option.
-
-config FW_LOADER_USER_HELPER
- bool
-
-config FW_LOADER_USER_HELPER_FALLBACK
- bool "Fallback user-helper invocation for firmware loading"
- depends on FW_LOADER
- select FW_LOADER_USER_HELPER
- help
- This option enables / disables the invocation of user-helper
- (e.g. udev) for loading firmware files as a fallback after the
- direct file loading in kernel fails. The user-mode helper is
- no longer required unless you have a special firmware file that
- resides in a non-standard path. Moreover, the udev support has
- been deprecated upstream.
-
- If you are unsure about this, say N here.
+ Say yes to disable driver features which enable building a custom
+ driver firmware at kernel build time. These drivers do not use the
+ kernel firmware API to load firmware (CONFIG_FW_LOADER), instead they
+ use their own custom loading mechanism. The required firmware is
+ usually shipped with the driver, building the driver firmware
+ should only be needed if you have an updated firmware source.
+
+ Firmware should not be being built as part of kernel, these days
+ you should always prevent this and say Y here. There are only two
+ old drivers which enable building of its firmware at kernel build
+ time:
+
+ o CONFIG_WANXL through CONFIG_WANXL_BUILD_FIRMWARE
+ o CONFIG_SCSI_AIC79XX through CONFIG_AIC79XX_BUILD_FIRMWARE
+
+source "drivers/base/firmware_loader/Kconfig"
config WANT_DEV_COREDUMP
bool
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index b074f242a435..704f44295810 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -8,10 +8,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
topology.o container.o property.o cacheinfo.o \
devcon.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
-obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
obj-y += power/
-obj-$(CONFIG_HAS_DMA) += dma-mapping.o
-obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
obj-$(CONFIG_ISA_BUS_API) += isa.o
obj-y += firmware_loader/
obj-$(CONFIG_NUMA) += node.o
diff --git a/drivers/base/base.h b/drivers/base/base.h
index d800de650fa5..a75c3025fb78 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -161,3 +161,6 @@ extern void device_links_driver_cleanup(struct device *dev);
extern void device_links_no_driver(struct device *dev);
extern bool device_links_busy(struct device *dev);
extern void device_links_unbind_consumers(struct device *dev);
+
+/* device pm support */
+void device_pm_move_to_tail(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index ef6183306b40..8bfd27ec73d6 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -184,10 +184,10 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf,
dev = bus_find_device_by_name(bus, NULL, buf);
if (dev && dev->driver == drv) {
- if (dev->parent) /* Needed for USB */
+ if (dev->parent && dev->bus->need_parent_lock)
device_lock(dev->parent);
device_release_driver(dev);
- if (dev->parent)
+ if (dev->parent && dev->bus->need_parent_lock)
device_unlock(dev->parent);
err = count;
}
@@ -211,12 +211,12 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
dev = bus_find_device_by_name(bus, NULL, buf);
if (dev && dev->driver == NULL && driver_match_device(drv, dev)) {
- if (dev->parent) /* Needed for USB */
+ if (dev->parent && bus->need_parent_lock)
device_lock(dev->parent);
device_lock(dev);
err = driver_probe_device(drv, dev);
device_unlock(dev);
- if (dev->parent)
+ if (dev->parent && bus->need_parent_lock)
device_unlock(dev->parent);
if (err > 0) {
@@ -735,10 +735,10 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
int ret = 0;
if (!dev->driver) {
- if (dev->parent) /* Needed for USB */
+ if (dev->parent && dev->bus->need_parent_lock)
device_lock(dev->parent);
ret = device_attach(dev);
- if (dev->parent)
+ if (dev->parent && dev->bus->need_parent_lock)
device_unlock(dev->parent);
}
return ret < 0 ? ret : 0;
@@ -770,10 +770,10 @@ EXPORT_SYMBOL_GPL(bus_rescan_devices);
int device_reprobe(struct device *dev)
{
if (dev->driver) {
- if (dev->parent) /* Needed for USB */
+ if (dev->parent && dev->bus->need_parent_lock)
device_lock(dev->parent);
device_release_driver(dev);
- if (dev->parent)
+ if (dev->parent && dev->bus->need_parent_lock)
device_unlock(dev->parent);
}
return bus_rescan_devices_helper(dev, NULL);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index edf726267282..2880e2ab01f5 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -32,50 +32,10 @@ struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
}
#ifdef CONFIG_OF
-static int cache_setup_of_node(unsigned int cpu)
-{
- struct device_node *np;
- struct cacheinfo *this_leaf;
- struct device *cpu_dev = get_cpu_device(cpu);
- struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
- unsigned int index = 0;
-
- /* skip if of_node is already populated */
- if (this_cpu_ci->info_list->of_node)
- return 0;
-
- if (!cpu_dev) {
- pr_err("No cpu device for CPU %d\n", cpu);
- return -ENODEV;
- }
- np = cpu_dev->of_node;
- if (!np) {
- pr_err("Failed to find cpu%d device node\n", cpu);
- return -ENOENT;
- }
-
- while (index < cache_leaves(cpu)) {
- this_leaf = this_cpu_ci->info_list + index;
- if (this_leaf->level != 1)
- np = of_find_next_cache_node(np);
- else
- np = of_node_get(np);/* cpu node itself */
- if (!np)
- break;
- this_leaf->of_node = np;
- index++;
- }
-
- if (index != cache_leaves(cpu)) /* not all OF nodes populated */
- return -ENOENT;
-
- return 0;
-}
-
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
{
- return sib_leaf->of_node == this_leaf->of_node;
+ return sib_leaf->fw_token == this_leaf->fw_token;
}
/* OF properties to query for a given cache type */
@@ -111,7 +71,7 @@ static inline int get_cacheinfo_idx(enum cache_type type)
return type;
}
-static void cache_size(struct cacheinfo *this_leaf)
+static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
{
const char *propname;
const __be32 *cache_size;
@@ -120,13 +80,14 @@ static void cache_size(struct cacheinfo *this_leaf)
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].size_prop;
- cache_size = of_get_property(this_leaf->of_node, propname, NULL);
+ cache_size = of_get_property(np, propname, NULL);
if (cache_size)
this_leaf->size = of_read_number(cache_size, 1);
}
/* not cache_line_size() because that's a macro in include/linux/cache.h */
-static void cache_get_line_size(struct cacheinfo *this_leaf)
+static void cache_get_line_size(struct cacheinfo *this_leaf,
+ struct device_node *np)
{
const __be32 *line_size;
int i, lim, ct_idx;
@@ -138,7 +99,7 @@ static void cache_get_line_size(struct cacheinfo *this_leaf)
const char *propname;
propname = cache_type_info[ct_idx].line_size_props[i];
- line_size = of_get_property(this_leaf->of_node, propname, NULL);
+ line_size = of_get_property(np, propname, NULL);
if (line_size)
break;
}
@@ -147,7 +108,7 @@ static void cache_get_line_size(struct cacheinfo *this_leaf)
this_leaf->coherency_line_size = of_read_number(line_size, 1);
}
-static void cache_nr_sets(struct cacheinfo *this_leaf)
+static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
{
const char *propname;
const __be32 *nr_sets;
@@ -156,7 +117,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf)
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].nr_sets_prop;
- nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
+ nr_sets = of_get_property(np, propname, NULL);
if (nr_sets)
this_leaf->number_of_sets = of_read_number(nr_sets, 1);
}
@@ -175,41 +136,77 @@ static void cache_associativity(struct cacheinfo *this_leaf)
this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
}
-static bool cache_node_is_unified(struct cacheinfo *this_leaf)
+static bool cache_node_is_unified(struct cacheinfo *this_leaf,
+ struct device_node *np)
{
- return of_property_read_bool(this_leaf->of_node, "cache-unified");
+ return of_property_read_bool(np, "cache-unified");
}
-static void cache_of_override_properties(unsigned int cpu)
+static void cache_of_set_props(struct cacheinfo *this_leaf,
+ struct device_node *np)
{
- int index;
+ /*
+ * init_cache_level must setup the cache level correctly
+ * overriding the architecturally specified levels, so
+ * if type is NONE at this stage, it should be unified
+ */
+ if (this_leaf->type == CACHE_TYPE_NOCACHE &&
+ cache_node_is_unified(this_leaf, np))
+ this_leaf->type = CACHE_TYPE_UNIFIED;
+ cache_size(this_leaf, np);
+ cache_get_line_size(this_leaf, np);
+ cache_nr_sets(this_leaf, np);
+ cache_associativity(this_leaf);
+}
+
+static int cache_setup_of_node(unsigned int cpu)
+{
+ struct device_node *np;
struct cacheinfo *this_leaf;
+ struct device *cpu_dev = get_cpu_device(cpu);
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ unsigned int index = 0;
- for (index = 0; index < cache_leaves(cpu); index++) {
+ /* skip if fw_token is already populated */
+ if (this_cpu_ci->info_list->fw_token) {
+ return 0;
+ }
+
+ if (!cpu_dev) {
+ pr_err("No cpu device for CPU %d\n", cpu);
+ return -ENODEV;
+ }
+ np = cpu_dev->of_node;
+ if (!np) {
+ pr_err("Failed to find cpu%d device node\n", cpu);
+ return -ENOENT;
+ }
+
+ while (index < cache_leaves(cpu)) {
this_leaf = this_cpu_ci->info_list + index;
- /*
- * init_cache_level must setup the cache level correctly
- * overriding the architecturally specified levels, so
- * if type is NONE at this stage, it should be unified
- */
- if (this_leaf->type == CACHE_TYPE_NOCACHE &&
- cache_node_is_unified(this_leaf))
- this_leaf->type = CACHE_TYPE_UNIFIED;
- cache_size(this_leaf);
- cache_get_line_size(this_leaf);
- cache_nr_sets(this_leaf);
- cache_associativity(this_leaf);
+ if (this_leaf->level != 1)
+ np = of_find_next_cache_node(np);
+ else
+ np = of_node_get(np);/* cpu node itself */
+ if (!np)
+ break;
+ cache_of_set_props(this_leaf, np);
+ this_leaf->fw_token = np;
+ index++;
}
+
+ if (index != cache_leaves(cpu)) /* not all OF nodes populated */
+ return -ENOENT;
+
+ return 0;
}
#else
-static void cache_of_override_properties(unsigned int cpu) { }
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
{
/*
- * For non-DT systems, assume unique level 1 cache, system-wide
+ * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
* shared caches for all other levels. This will be used only if
* arch specific code has not populated shared_cpu_map
*/
@@ -217,6 +214,11 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
}
#endif
+int __weak cache_setup_acpi(unsigned int cpu)
+{
+ return -ENOTSUPP;
+}
+
static int cache_shared_cpu_map_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@@ -230,8 +232,8 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
if (of_have_populated_dt())
ret = cache_setup_of_node(cpu);
else if (!acpi_disabled)
- /* No cache property/hierarchy support yet in ACPI */
- ret = -ENOTSUPP;
+ ret = cache_setup_acpi(cpu);
+
if (ret)
return ret;
@@ -282,16 +284,11 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
}
- of_node_put(this_leaf->of_node);
+ if (of_have_populated_dt())
+ of_node_put(this_leaf->fw_token);
}
}
-static void cache_override_properties(unsigned int cpu)
-{
- if (of_have_populated_dt())
- return cache_of_override_properties(cpu);
-}
-
static void free_cache_attributes(unsigned int cpu)
{
if (!per_cpu_cacheinfo(cpu))
@@ -325,12 +322,17 @@ static int detect_cache_attributes(unsigned int cpu)
if (per_cpu_cacheinfo(cpu) == NULL)
return -ENOMEM;
+ /*
+ * populate_cache_leaves() may completely setup the cache leaves and
+ * shared_cpu_map or it may leave it partially setup.
+ */
ret = populate_cache_leaves(cpu);
if (ret)
goto free_ci;
/*
- * For systems using DT for cache hierarchy, of_node and shared_cpu_map
- * will be set up here only if they are not populated already
+ * For systems using DT for cache hierarchy, fw_token
+ * and shared_cpu_map will be set up here only if they are
+ * not populated already
*/
ret = cache_shared_cpu_map_setup(cpu);
if (ret) {
@@ -338,7 +340,6 @@ static int detect_cache_attributes(unsigned int cpu)
goto free_ci;
}
- cache_override_properties(cpu);
return 0;
free_ci:
diff --git a/drivers/base/core.c b/drivers/base/core.c
index b610816eb887..df3e1a44707a 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -145,6 +145,26 @@ static int device_reorder_to_tail(struct device *dev, void *not_used)
}
/**
+ * device_pm_move_to_tail - Move set of devices to the end of device lists
+ * @dev: Device to move
+ *
+ * This is a device_reorder_to_tail() wrapper taking the requisite locks.
+ *
+ * It moves the @dev along with all of its children and all of its consumers
+ * to the ends of the device_kset and dpm_list, recursively.
+ */
+void device_pm_move_to_tail(struct device *dev)
+{
+ int idx;
+
+ idx = device_links_read_lock();
+ device_pm_lock();
+ device_reorder_to_tail(dev, NULL);
+ device_pm_unlock();
+ device_links_read_unlock(idx);
+}
+
+/**
* device_link_add - Create a link between two devices.
* @consumer: Consumer end of the link.
* @supplier: Supplier end of the link.
@@ -216,6 +236,13 @@ struct device_link *device_link_add(struct device *consumer,
link->rpm_active = true;
}
pm_runtime_new_link(consumer);
+ /*
+ * If the link is being added by the consumer driver at probe
+ * time, balance the decrementation of the supplier's runtime PM
+ * usage counter after consumer probe in driver_probe_device().
+ */
+ if (consumer->links.status == DL_DEV_PROBING)
+ pm_runtime_get_noresume(supplier);
}
get_device(supplier);
link->supplier = supplier;
@@ -235,12 +262,12 @@ struct device_link *device_link_add(struct device *consumer,
switch (consumer->links.status) {
case DL_DEV_PROBING:
/*
- * Balance the decrementation of the supplier's
- * runtime PM usage counter after consumer probe
- * in driver_probe_device().
+ * Some callers expect the link creation during
+ * consumer driver probe to resume the supplier
+ * even without DL_FLAG_RPM_ACTIVE.
*/
if (flags & DL_FLAG_PM_RUNTIME)
- pm_runtime_get_sync(supplier);
+ pm_runtime_resume(supplier);
link->status = DL_STATE_CONSUMER_PROBE;
break;
@@ -1467,7 +1494,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
dir = kzalloc(sizeof(*dir), GFP_KERNEL);
if (!dir)
- return NULL;
+ return ERR_PTR(-ENOMEM);
dir->class = class;
kobject_init(&dir->kobj, &class_dir_ktype);
@@ -1477,7 +1504,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
if (retval < 0) {
kobject_put(&dir->kobj);
- return NULL;
+ return ERR_PTR(retval);
}
return &dir->kobj;
}
@@ -1784,6 +1811,10 @@ int device_add(struct device *dev)
parent = get_device(dev->parent);
kobj = get_device_parent(dev, parent);
+ if (IS_ERR(kobj)) {
+ error = PTR_ERR(kobj);
+ goto parent_error;
+ }
if (kobj)
dev->kobj.parent = kobj;
@@ -1882,6 +1913,7 @@ done:
kobject_del(&dev->kobj);
Error:
cleanup_glue_dir(dev, glue_dir);
+parent_error:
put_device(parent);
name_error:
kfree(dev->p);
@@ -2406,7 +2438,7 @@ static void device_create_release(struct device *dev)
kfree(dev);
}
-static struct device *
+static __printf(6, 0) struct device *
device_create_groups_vargs(struct class *class, struct device *parent,
dev_t devt, void *drvdata,
const struct attribute_group **groups,
@@ -2684,7 +2716,7 @@ static int device_move_class_links(struct device *dev,
/**
* device_move - moves a device to a new parent
* @dev: the pointer to the struct device to be moved
- * @new_parent: the new parent of the device (can by NULL)
+ * @new_parent: the new parent of the device (can be NULL)
* @dpm_order: how to reorder the dpm_list
*/
int device_move(struct device *dev, struct device *new_parent,
@@ -2701,6 +2733,11 @@ int device_move(struct device *dev, struct device *new_parent,
device_pm_lock();
new_parent = get_device(new_parent);
new_parent_kobj = get_device_parent(dev, new_parent);
+ if (IS_ERR(new_parent_kobj)) {
+ error = PTR_ERR(new_parent_kobj);
+ put_device(new_parent);
+ goto out;
+ }
pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
__func__, new_parent ? dev_name(new_parent) : "<NULL>");
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 2da998baa75c..30cc9c877ebb 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -534,14 +534,22 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
&dev_attr_spectre_v1.attr,
&dev_attr_spectre_v2.attr,
+ &dev_attr_spec_store_bypass.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index c9f54089429b..6ebcd65d64b6 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -122,9 +122,7 @@ static void deferred_probe_work_func(struct work_struct *work)
* the list is a good order for suspend but deferred
* probe makes that very unsafe.
*/
- device_pm_lock();
- device_pm_move_last(dev);
- device_pm_unlock();
+ device_pm_move_to_tail(dev);
dev_dbg(dev, "Retrying from deferred list\n");
if (initcall_debug && !initcalls_done)
@@ -436,14 +434,6 @@ re_probe:
goto probe_failed;
}
- /*
- * Ensure devices are listed in devices_kset in correct order
- * It's important to move Dev to the end of devices_kset before
- * calling .probe, because it could be recursive and parent Dev
- * should always go first
- */
- devices_kset_move_last(dev);
-
if (dev->bus->probe) {
ret = dev->bus->probe(dev);
if (ret)
@@ -817,13 +807,13 @@ static int __driver_attach(struct device *dev, void *data)
return ret;
} /* ret > 0 means positive match */
- if (dev->parent) /* Needed for USB */
+ if (dev->parent && dev->bus->need_parent_lock)
device_lock(dev->parent);
device_lock(dev);
if (!dev->driver)
driver_probe_device(drv, dev);
device_unlock(dev);
- if (dev->parent)
+ if (dev->parent && dev->bus->need_parent_lock)
device_unlock(dev->parent);
return 0;
@@ -919,7 +909,7 @@ void device_release_driver_internal(struct device *dev,
struct device_driver *drv,
struct device *parent)
{
- if (parent)
+ if (parent && dev->bus->need_parent_lock)
device_lock(parent);
device_lock(dev);
@@ -927,7 +917,7 @@ void device_release_driver_internal(struct device *dev,
__device_release_driver(dev, parent);
device_unlock(dev);
- if (parent)
+ if (parent && dev->bus->need_parent_lock)
device_unlock(parent);
}
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 95b67281cd2a..f98a097e73f2 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -84,9 +84,14 @@ static struct devres_group * node_to_group(struct devres_node *node)
static __always_inline struct devres * alloc_dr(dr_release_t release,
size_t size, gfp_t gfp, int nid)
{
- size_t tot_size = sizeof(struct devres) + size;
+ size_t tot_size;
struct devres *dr;
+ /* We must catch any near-SIZE_MAX cases that could overflow. */
+ if (unlikely(check_add_overflow(sizeof(struct devres), size,
+ &tot_size)))
+ return NULL;
+
dr = kmalloc_node_track_caller(tot_size, gfp, nid);
if (unlikely(!dr))
return NULL;
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
deleted file mode 100644
index 1e6396bb807b..000000000000
--- a/drivers/base/dma-coherent.c
+++ /dev/null
@@ -1,433 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Coherent per-device memory handling.
- * Borrowed from i386
- */
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-
-struct dma_coherent_mem {
- void *virt_base;
- dma_addr_t device_base;
- unsigned long pfn_base;
- int size;
- int flags;
- unsigned long *bitmap;
- spinlock_t spinlock;
- bool use_dev_dma_pfn_offset;
-};
-
-static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
-
-static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
-{
- if (dev && dev->dma_mem)
- return dev->dma_mem;
- return NULL;
-}
-
-static inline dma_addr_t dma_get_device_base(struct device *dev,
- struct dma_coherent_mem * mem)
-{
- if (mem->use_dev_dma_pfn_offset)
- return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
- else
- return mem->device_base;
-}
-
-static int dma_init_coherent_memory(
- phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
- struct dma_coherent_mem **mem)
-{
- struct dma_coherent_mem *dma_mem = NULL;
- void __iomem *mem_base = NULL;
- int pages = size >> PAGE_SHIFT;
- int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
- int ret;
-
- if (!size) {
- ret = -EINVAL;
- goto out;
- }
-
- mem_base = memremap(phys_addr, size, MEMREMAP_WC);
- if (!mem_base) {
- ret = -EINVAL;
- goto out;
- }
- dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
- if (!dma_mem) {
- ret = -ENOMEM;
- goto out;
- }
- dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!dma_mem->bitmap) {
- ret = -ENOMEM;
- goto out;
- }
-
- dma_mem->virt_base = mem_base;
- dma_mem->device_base = device_addr;
- dma_mem->pfn_base = PFN_DOWN(phys_addr);
- dma_mem->size = pages;
- dma_mem->flags = flags;
- spin_lock_init(&dma_mem->spinlock);
-
- *mem = dma_mem;
- return 0;
-
-out:
- kfree(dma_mem);
- if (mem_base)
- memunmap(mem_base);
- return ret;
-}
-
-static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
-{
- if (!mem)
- return;
-
- memunmap(mem->virt_base);
- kfree(mem->bitmap);
- kfree(mem);
-}
-
-static int dma_assign_coherent_memory(struct device *dev,
- struct dma_coherent_mem *mem)
-{
- if (!dev)
- return -ENODEV;</