]> git.baikalelectronics.ru Git - kernel.git/commitdiff
cacheinfo: Allow early detection and population of cache attributes
authorSudeep Holla <sudeep.holla@arm.com>
Mon, 4 Jul 2022 10:15:50 +0000 (11:15 +0100)
committerSudeep Holla <sudeep.holla@arm.com>
Mon, 4 Jul 2022 15:22:28 +0000 (16:22 +0100)
Some architecture/platforms may need to setup cache properties very
early in the boot along with other cpu topologies so that all these
information can be used to build sched_domains which is used by the
scheduler.

Allow detect_cache_attributes to be called quite early during the boot.

Link: https://lore.kernel.org/r/20220704101605.1318280-7-sudeep.holla@arm.com
Tested-by: Ionela Voinescu <ionela.voinescu@arm.com>
Tested-by: Conor Dooley <conor.dooley@microchip.com>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
drivers/base/cacheinfo.c
include/linux/cacheinfo.h

index fdc1baa342f1c67efe3edb3a58010ef5c8047a28..4d21a1022fa9214da0023f897dde8b4aff32097d 100644 (file)
@@ -193,14 +193,8 @@ static int cache_setup_of_node(unsigned int cpu)
 {
        struct device_node *np;
        struct cacheinfo *this_leaf;
-       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
        unsigned int index = 0;
 
-       /* skip if fw_token is already populated */
-       if (this_cpu_ci->info_list->fw_token) {
-               return 0;
-       }
-
        np = of_cpu_device_node_get(cpu);
        if (!np) {
                pr_err("Failed to find cpu%d device node\n", cpu);
@@ -236,6 +230,18 @@ int __weak cache_setup_acpi(unsigned int cpu)
 
 unsigned int coherency_max_size;
 
+static int cache_setup_properties(unsigned int cpu)
+{
+       int ret = 0;
+
+       if (of_have_populated_dt())
+               ret = cache_setup_of_node(cpu);
+       else if (!acpi_disabled)
+               ret = cache_setup_acpi(cpu);
+
+       return ret;
+}
+
 static int cache_shared_cpu_map_setup(unsigned int cpu)
 {
        struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
@@ -246,21 +252,21 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
        if (this_cpu_ci->cpu_map_populated)
                return 0;
 
-       if (of_have_populated_dt())
-               ret = cache_setup_of_node(cpu);
-       else if (!acpi_disabled)
-               ret = cache_setup_acpi(cpu);
-
-       if (ret)
-               return ret;
+       /*
+        * skip setting up cache properties if LLC is valid, just need
+        * to update the shared cpu_map if the cache attributes were
+        * populated early before all the cpus are brought online
+        */
+       if (!last_level_cache_is_valid(cpu)) {
+               ret = cache_setup_properties(cpu);
+               if (ret)
+                       return ret;
+       }
 
        for (index = 0; index < cache_leaves(cpu); index++) {
                unsigned int i;
 
                this_leaf = per_cpu_cacheinfo_idx(cpu, index);
-               /* skip if shared_cpu_map is already populated */
-               if (!cpumask_empty(&this_leaf->shared_cpu_map))
-                       continue;
 
                cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
                for_each_online_cpu(i) {
@@ -330,17 +336,28 @@ int __weak populate_cache_leaves(unsigned int cpu)
        return -ENOENT;
 }
 
-static int detect_cache_attributes(unsigned int cpu)
+int detect_cache_attributes(unsigned int cpu)
 {
        int ret;
 
+       /* Since early detection of the cacheinfo is allowed via this
+        * function and this also gets called as CPU hotplug callbacks via
+        * cacheinfo_cpu_online, the initialisation can be skipped and only
+        * CPU maps can be updated as the CPU online status would be update
+        * if called via cacheinfo_cpu_online path.
+        */
+       if (per_cpu_cacheinfo(cpu))
+               goto update_cpu_map;
+
        if (init_cache_level(cpu) || !cache_leaves(cpu))
                return -ENOENT;
 
        per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
                                         sizeof(struct cacheinfo), GFP_KERNEL);
-       if (per_cpu_cacheinfo(cpu) == NULL)
+       if (per_cpu_cacheinfo(cpu) == NULL) {
+               cache_leaves(cpu) = 0;
                return -ENOMEM;
+       }
 
        /*
         * populate_cache_leaves() may completely setup the cache leaves and
@@ -349,6 +366,8 @@ static int detect_cache_attributes(unsigned int cpu)
        ret = populate_cache_leaves(cpu);
        if (ret)
                goto free_ci;
+
+update_cpu_map:
        /*
         * For systems using DT for cache hierarchy, fw_token
         * and shared_cpu_map will be set up here only if they are
index 7e429bc5c1a43a0200622611d6dd073d20d4b36b..00b7a6ae8617588c5a26f6d5c2275de4b8699f25 100644 (file)
@@ -84,6 +84,7 @@ int populate_cache_leaves(unsigned int cpu);
 int cache_setup_acpi(unsigned int cpu);
 bool last_level_cache_is_valid(unsigned int cpu);
 bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y);
+int detect_cache_attributes(unsigned int cpu);
 #ifndef CONFIG_ACPI_PPTT
 /*
  * acpi_find_last_cache_level is only called on ACPI enabled