summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorGautham R. Shenoy <ego@linux.vnet.ibm.com>2020-12-10 16:08:57 +0530
committerMichael Ellerman <mpe@ellerman.id.au>2020-12-11 00:10:25 +1100
commitfbd2b672e91d276b9fa5a729e4a823ba29fa2692 (patch)
tree7a3cdcc79b02c37da3b13584bac60bcb45ad809e /arch/powerpc/kernel
parent1fdc1d6632ff3f6813a2f15b65586bde8fe0f0ba (diff)
powerpc/smp: Rename init_thread_group_l1_cache_map() to make it generic
init_thread_group_l1_cache_map() initializes the per-cpu cpumask thread_group_l1_cache_map with the core-siblings which share L1 cache with the CPU. Make this function generic to the cache-property (L1 or L2) and update a suitable mask. This is a preparatory patch for the next patch where we will introduce discovery of thread-groups that share L2-cache. No functional change. Signed-off-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/1607596739-32439-4-git-send-email-ego@linux.vnet.ibm.com
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/smp.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index f3290d57fea6..9078b5b5d6e4 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -866,15 +866,18 @@ out:
return tg;
}
-static int init_thread_group_l1_cache_map(int cpu)
+static int __init init_thread_group_cache_map(int cpu, int cache_property)
{
int first_thread = cpu_first_thread_sibling(cpu);
int i, cpu_group_start = -1, err = 0;
struct thread_groups *tg = NULL;
+ cpumask_var_t *mask;
- tg = get_thread_groups(cpu, THREAD_GROUP_SHARE_L1,
- &err);
+ if (cache_property != THREAD_GROUP_SHARE_L1)
+ return -EINVAL;
+
+ tg = get_thread_groups(cpu, cache_property, &err);
if (!tg)
return err;
@@ -885,8 +888,8 @@ static int init_thread_group_l1_cache_map(int cpu)
return -ENODATA;
}
- zalloc_cpumask_var_node(&per_cpu(thread_group_l1_cache_map, cpu),
- GFP_KERNEL, cpu_to_node(cpu));
+ mask = &per_cpu(thread_group_l1_cache_map, cpu);
+ zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
for (i = first_thread; i < first_thread + threads_per_core; i++) {
int i_group_start = get_cpu_thread_group_start(i, tg);
@@ -897,7 +900,7 @@ static int init_thread_group_l1_cache_map(int cpu)
}
if (i_group_start == cpu_group_start)
- cpumask_set_cpu(i, per_cpu(thread_group_l1_cache_map, cpu));
+ cpumask_set_cpu(i, *mask);
}
return 0;
@@ -976,7 +979,7 @@ static int init_big_cores(void)
int cpu;
for_each_possible_cpu(cpu) {
- int err = init_thread_group_l1_cache_map(cpu);
+ int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1);
if (err)
return err;