]> git.baikalelectronics.ru Git - arm-tf.git/commitdiff
fconf: Extract topology node properties from HW_CONFIG dtb
authorMadhukar Pappireddy <madhukar.pappireddy@arm.com>
Fri, 27 Dec 2019 18:02:34 +0000 (12:02 -0600)
committerMadhukar Pappireddy <madhukar.pappireddy@arm.com>
Wed, 11 Mar 2020 16:25:10 +0000 (11:25 -0500)
Create, register( and implicitly invoke) fconf_populate_topology()
function which extracts the topology related properties from dtb into
the newly created fconf based configuration structure 'soc_topology'.
Appropriate libfdt APIs are added to jmptbl.i file for use with USE_ROMLIB
build feature.

A new property which describes the power domain levels is added to the
HW_CONFIG device tree source files.

This patch also fixes a minor bug in the common device tree file
fvp-base-gicv3-psci-dynamiq-common.dtsi
As this file includes fvp-base-gicv3-psci-common.dtsi, it is necessary
to delete all previous cluster node definitons because DynamIQ based
models have upto 8 CPUs in each cluster. If not deleted, the final dts
would have an inaccurate description of SoC topology, i.e., cluster0
with 8 or more core nodes and cluster1 with 4 core nodes.

Change-Id: I9eb406da3ba4732008a66c01afec7c9fa8ef59bf
Signed-off-by: Madhukar Pappireddy <madhukar.pappireddy@arm.com>
fdts/fvp-base-gicv2-psci-aarch32.dts
fdts/fvp-base-gicv2-psci.dts
fdts/fvp-base-gicv3-psci-aarch32-common.dtsi
fdts/fvp-base-gicv3-psci-common.dtsi
fdts/fvp-base-gicv3-psci-dynamiq-common.dtsi
fdts/fvp-foundation-gicv2-psci.dts
fdts/fvp-foundation-gicv3-psci.dts
plat/arm/board/fvp/fconf/fconf_hw_config_getter.c
plat/arm/board/fvp/include/fconf_hw_config_getter.h
plat/arm/board/fvp/jmptbl.i

index e71a39519d07611ba9cf713a143b2ee001a6bfb1..fcef927b367c092193ec079d1ca08de9e7497a1a 100644 (file)
@@ -35,6 +35,7 @@
                cpu_on = <0x84000003>;
                sys_poweroff = <0x84000008>;
                sys_reset = <0x84000009>;
+               max-pwr-lvl = <2>;
        };
 
        cpus {
index c9c9d959474374b92deb50197be4d9bc1d0d4db6..1e0a81c3c9e818ef82fdf604bb42053f31825b98 100644 (file)
@@ -35,6 +35,7 @@
                cpu_on = <0xc4000003>;
                sys_poweroff = <0x84000008>;
                sys_reset = <0x84000009>;
+               max-pwr-lvl = <2>;
        };
 
        cpus {
index f9809db8bdf9dcc44c2885a6f20cd19a59d45e6c..a28a4a537292cdd1c98ff9ed051527c44d00014d 100644 (file)
@@ -33,6 +33,7 @@
                cpu_on = <0x84000003>;
                sys_poweroff = <0x84000008>;
                sys_reset = <0x84000009>;
+               max-pwr-lvl = <2>;
        };
 
        cpus {
index 5b0470d895bb443d978cb4d866427438fbca33a9..fb73f60532265232a6c0414a29417aa006eb0eb6 100644 (file)
@@ -33,6 +33,7 @@
                cpu_on = <0xc4000003>;
                sys_poweroff = <0x84000008>;
                sys_reset = <0x84000009>;
+               max-pwr-lvl = <2>;
        };
 
        cpus {
index f3f768417f9fbaf85a2e176f007a717a662639a8..4bed36ff4cc6dc8c0934db63ca5cd610ab45de81 100644 (file)
@@ -11,6 +11,9 @@
 /* DynamIQ based designs have upto 8 CPUs in each cluster */
 
 &CPU_MAP {
+       /delete-node/ cluster0;
+       /delete-node/ cluster1;
+
        cluster0 {
                core0 {
                        cpu = <&CPU0>;
index b6da905492e036bd6b50c61aa3c1b1a1fc5ab172..3a204cb24755f8df5d0fcf28635edfdaa64e6774 100644 (file)
@@ -35,6 +35,7 @@
                cpu_on = <0xc4000003>;
                sys_poweroff = <0x84000008>;
                sys_reset = <0x84000009>;
+               max-pwr-lvl = <2>;
        };
 
        cpus {
index 81071e255b0ed02c7204755da2dfaa714e1925be..d85305afe3bb9c95b4c5fccb0fe7b53a874fedf0 100644 (file)
@@ -35,6 +35,7 @@
                cpu_on = <0xc4000003>;
                sys_poweroff = <0x84000008>;
                sys_reset = <0x84000009>;
+               max-pwr-lvl = <2>;
        };
 
        cpus {
index 1a1d0566f3d5783e9f63479dd1907720c3e0988c..2952cde80d2298f31bf13b8f06ced1adfbba8e27 100644 (file)
@@ -12,6 +12,7 @@
 #include <plat/common/platform.h>
 
 struct gicv3_config_t gicv3_config;
+struct hw_topology_t soc_topology;
 
 int fconf_populate_gicv3_config(uintptr_t config)
 {
@@ -48,5 +49,120 @@ int fconf_populate_gicv3_config(uintptr_t config)
        return err;
 }
 
+int fconf_populate_topology(uintptr_t config)
+{
+       int err, node, cluster_node, core_node, thread_node, max_pwr_lvl = 0;
+       uint32_t cluster_count = 0, max_cpu_per_cluster = 0, total_cpu_count = 0;
+
+       /* Necessary to work with libfdt APIs */
+       const void *hw_config_dtb = (const void *)config;
+
+       /* Find the offset of the node containing "arm,psci-1.0" compatible property */
+       node = fdt_node_offset_by_compatible(hw_config_dtb, -1, "arm,psci-1.0");
+       if (node < 0) {
+               ERROR("FCONF: Unable to locate node with arm,psci-1.0 compatible property\n");
+               return node;
+       }
+
+       err = fdtw_read_cells(hw_config_dtb, node, "max-pwr-lvl", 1, &max_pwr_lvl);
+       if (err < 0) {
+               /*
+                * Some legacy FVP dts may not have this property. Assign the default
+                * value.
+                */
+               WARN("FCONF: Could not locate max-pwr-lvl property\n");
+               max_pwr_lvl = 2;
+       }
+
+       assert((uint32_t)max_pwr_lvl <= MPIDR_AFFLVL2);
+
+       /* Find the offset of the "cpus" node */
+       node = fdt_path_offset(hw_config_dtb, "/cpus");
+       if (node < 0) {
+               ERROR("FCONF: Node '%s' not found in hardware configuration dtb\n", "cpus");
+               return node;
+       }
+
+       /* A typical cpu-map node in a device tree is shown here for reference
+       cpu-map {
+               cluster0 {
+                       core0 {
+                               cpu = <&CPU0>;
+                       };
+                       core1 {
+                               cpu = <&CPU1>;
+                       };
+               };
+
+               cluster1 {
+                       core0 {
+                               cpu = <&CPU2>;
+                       };
+                       core1 {
+                               cpu = <&CPU3>;
+                       };
+               };
+       };
+       */
+
+       /* Locate the cpu-map child node */
+       node = fdt_subnode_offset(hw_config_dtb, node, "cpu-map");
+       if (node < 0) {
+               ERROR("FCONF: Node '%s' not found in hardware configuration dtb\n", "cpu-map");
+               return node;
+       }
+
+       uint32_t cpus_per_cluster[PLAT_ARM_CLUSTER_COUNT] = {0};
+
+       /* Iterate through cluster nodes */
+       fdt_for_each_subnode(cluster_node, hw_config_dtb, node) {
+               assert(cluster_count < PLAT_ARM_CLUSTER_COUNT);
+
+               /* Iterate through core nodes */
+               fdt_for_each_subnode(core_node, hw_config_dtb, cluster_node) {
+                       /* core nodes may have child nodes i.e., "thread" nodes */
+                       if (fdt_first_subnode(hw_config_dtb, core_node) < 0) {
+                               cpus_per_cluster[cluster_count]++;
+                       } else {
+                               /* Multi-threaded CPU description is found in dtb */
+                               fdt_for_each_subnode(thread_node, hw_config_dtb, core_node) {
+                                       cpus_per_cluster[cluster_count]++;
+                               }
+
+                               /* Since in some dtbs, core nodes may not have thread node,
+                                * no need to error if even one child node is not found.
+                                */
+                       }
+               }
+
+               /* Ensure every cluster node has at least 1 child node */
+               if (cpus_per_cluster[cluster_count] < 1U) {
+                       ERROR("FCONF: Unable to locate the core node in cluster %d\n", cluster_count);
+                       return -1;
+               }
+
+               INFO("CLUSTER ID: %d cpu-count: %d\n", cluster_count, cpus_per_cluster[cluster_count]);
+
+               /* Find the maximum number of cpus in any cluster */
+               max_cpu_per_cluster = MAX(max_cpu_per_cluster, cpus_per_cluster[cluster_count]);
+               total_cpu_count += cpus_per_cluster[cluster_count];
+               cluster_count++;
+       }
+
+
+       /* At least one cluster node is expected in hardware configuration dtb */
+       if (cluster_count < 1U) {
+               ERROR("FCONF: Unable to locate the cluster node in cpu-map node\n");
+               return -1;
+       }
+
+       soc_topology.plat_max_pwr_level = (uint32_t)max_pwr_lvl;
+       soc_topology.plat_cluster_count = cluster_count;
+       soc_topology.cluster_cpu_count = max_cpu_per_cluster;
+       soc_topology.plat_cpu_count = total_cpu_count;
+
+       return 0;
+}
 
 FCONF_REGISTER_POPULATOR(HW_CONFIG, gicv3_config, fconf_populate_gicv3_config);
+FCONF_REGISTER_POPULATOR(HW_CONFIG, topology, fconf_populate_topology);
index cc1576e18729048aa80eca2be4b7e3f40b90b28e..cab832f683985a9575f441179b194a978d4a08cd 100644 (file)
 /* Hardware Config related getter */
 #define hw_config__gicv3_config_getter(prop) gicv3_config.prop
 
+#define hw_config__topology_getter(prop) soc_topology.prop
+
 struct gicv3_config_t {
        void *gicd_base;
        void *gicr_base;
 };
 
+struct hw_topology_t {
+       uint32_t plat_cluster_count;
+       uint32_t cluster_cpu_count;
+       uint32_t plat_cpu_count;
+       uint32_t plat_max_pwr_level;
+};
+
 int fconf_populate_gicv3_config(uintptr_t config);
+int fconf_populate_topology(uintptr_t config);
 
 extern struct gicv3_config_t gicv3_config;
+extern struct hw_topology_t soc_topology;
 
 #endif /* FCONF_HW_CONFIG_GETTER_H */
index b1b9ed463e05d0adbc359861980182f490eead0f..0c93d0aab8f2e815417510303f73f883b0d5d45f 100644 (file)
@@ -22,6 +22,8 @@ fdt     fdt_node_offset_by_compatible
 fdt     fdt_setprop_inplace_namelen_partial
 fdt     fdt_first_subnode
 fdt     fdt_next_subnode
+fdt     fdt_path_offset
+fdt     fdt_subnode_offset
 mbedtls mbedtls_asn1_get_alg
 mbedtls mbedtls_asn1_get_alg_null
 mbedtls mbedtls_asn1_get_bitstring_null