DECLARE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
DECLARE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
+DECLARE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map);
#ifdef CONFIG_SMP
extern bool has_big_cores;
extern bool thread_group_shares_l2;
+extern bool thread_group_shares_l3;
#define cpu_smt_mask cpu_smt_mask
#ifdef CONFIG_SCHED_SMT
#define hard_smp_processor_id() get_hard_smp_processor_id(0)
#define smp_setup_cpu_maps()
#define thread_group_shares_l2 0
+#define thread_group_shares_l3 0
static inline void inhibit_secondary_onlining(void) {}
static inline void uninhibit_secondary_onlining(void) {}
static inline const struct cpumask *cpu_sibling_mask(int cpu)
bool has_big_cores;
bool coregroup_enabled;
bool thread_group_shares_l2;
+bool thread_group_shares_l3;
DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
#define MAX_THREAD_LIST_SIZE 8
#define THREAD_GROUP_SHARE_L1 1
-#define THREAD_GROUP_SHARE_L2 2
+#define THREAD_GROUP_SHARE_L2_L3 2
struct thread_groups {
unsigned int property;
unsigned int nr_groups;
*/
DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
+/*
+ * On P10, thread_group_l3_cache_map for each CPU is equal to the
+ * thread_group_l2_cache_map
+ */
+DEFINE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map);
+
/* SMP operations for this machine */
struct smp_ops_t *smp_ops;
return tg;
}
+static int update_mask_from_threadgroup(cpumask_var_t *mask, struct thread_groups *tg, int cpu, int cpu_group_start)
+{
+ int first_thread = cpu_first_thread_sibling(cpu);
+ int i;
+
+ zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
+
+ for (i = first_thread; i < first_thread + threads_per_core; i++) {
+ int i_group_start = get_cpu_thread_group_start(i, tg);
+
+ if (unlikely(i_group_start == -1)) {
+ WARN_ON_ONCE(1);
+ return -ENODATA;
+ }
+
+ if (i_group_start == cpu_group_start)
+ cpumask_set_cpu(i, *mask);
+ }
+
+ return 0;
+}
+
static int __init init_thread_group_cache_map(int cpu, int cache_property)
{
- int first_thread = cpu_first_thread_sibling(cpu);
- int i, cpu_group_start = -1, err = 0;
+ int cpu_group_start = -1, err = 0;
struct thread_groups *tg = NULL;
cpumask_var_t *mask = NULL;
if (cache_property != THREAD_GROUP_SHARE_L1 &&
- cache_property != THREAD_GROUP_SHARE_L2)
+ cache_property != THREAD_GROUP_SHARE_L2_L3)
return -EINVAL;
tg = get_thread_groups(cpu, cache_property, &err);
+
if (!tg)
return err;
return -ENODATA;
}
- if (cache_property == THREAD_GROUP_SHARE_L1)
+ if (cache_property == THREAD_GROUP_SHARE_L1) {
mask = &per_cpu(thread_group_l1_cache_map, cpu);
- else if (cache_property == THREAD_GROUP_SHARE_L2)
+ update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
+ }
+ else if (cache_property == THREAD_GROUP_SHARE_L2_L3) {
mask = &per_cpu(thread_group_l2_cache_map, cpu);
-
- zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
-
- for (i = first_thread; i < first_thread + threads_per_core; i++) {
- int i_group_start = get_cpu_thread_group_start(i, tg);
-
- if (unlikely(i_group_start == -1)) {
- WARN_ON_ONCE(1);
- return -ENODATA;
- }
-
- if (i_group_start == cpu_group_start)
- cpumask_set_cpu(i, *mask);
+ update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
+ mask = &per_cpu(thread_group_l3_cache_map, cpu);
+ update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
}
+
return 0;
}
has_big_cores = true;
for_each_possible_cpu(cpu) {
- int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2);
+ int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3);
if (err)
return err;
}
thread_group_shares_l2 = true;
- pr_debug("L2 cache only shared by the threads in the small core\n");
+ thread_group_shares_l3 = true;
+ pr_debug("L2/L3 cache only shared by the threads in the small core\n");
+
return 0;
}