commit 3fcbf1c77d08 ("arch_topology: Fix cache attributes detection in the CPU hotplug path") adds a call to detect_cache_attributes() to populate the cacheinfo before updating the siblings mask. detect_cache_attributes() allocates memory and can take the PPTT mutex (on ACPI platforms). On PREEMPT_RT kernels, on secondary CPUs, this triggers a: 'BUG: sleeping function called from invalid context' as the code is executed with preemption and interrupts disabled:
| BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:46 | in_atomic(): 1, irqs_disabled(): 128, non_block: 0, pid: 0, name: swapper/111 | preempt_count: 1, expected: 0 | RCU nest depth: 1, expected: 1 | 3 locks held by swapper/111/0: | #0: (&pcp->lock){+.+.}-{3:3}, at: get_page_from_freelist+0x218/0x12c8 | #1: (rcu_read_lock){....}-{1:3}, at: rt_spin_trylock+0x48/0xf0 | #2: (&zone->lock){+.+.}-{3:3}, at: rmqueue_bulk+0x64/0xa80 | irq event stamp: 0 | hardirqs last enabled at (0): 0x0 | hardirqs last disabled at (0): copy_process+0x5dc/0x1ab8 | softirqs last enabled at (0): copy_process+0x5dc/0x1ab8 | softirqs last disabled at (0): 0x0 | Preemption disabled at: | migrate_enable+0x30/0x130 | CPU: 111 PID: 0 Comm: swapper/111 Tainted: G W 6.0.0-rc4-rt6-[...] | Call trace: | __kmalloc+0xbc/0x1e8 | detect_cache_attributes+0x2d4/0x5f0 | update_siblings_masks+0x30/0x368 | store_cpu_topology+0x78/0xb8 | secondary_start_kernel+0xd0/0x198 | __secondary_switched+0xb0/0xb4
Pierre fixed this issue in the upstream 6.3 and the original series is follows: https://lore.kernel.org/all/167404285593.885445.6219705651301997538.b4-ty@ar...
We also encountered the same issue on 6.1 stable branch, and need to backport this series: cacheinfo: Use RISC-V's init_cache_level() as generic OF implementation cacheinfo: Return error code in init_of_cache_level() cacheinfo: Check 'cache-unified' property to count cache leaves ACPI: PPTT: Remove acpi_find_cache_levels() ACPI: PPTT: Update acpi_find_last_cache_level() to acpi_get_cache_info() arch_topology: Build cacheinfo from primary CPU
And there was a non-trivial number of follow-on fixes for patches in this series, as pointed out by Greg in the 6.1.156-RC1 review: cacheinfo: Initialize variables in fetch_cache_info() cacheinfo: Fix LLC is not exported through sysfs drivers: base: cacheinfo: Update cpu_map_populated during CPU Hotplug
Finally, Jon discovered an issue in the Tegra platform caused by these patches: https://lore.kernel.org/all/046f08cb-0610-48c9-af24-4804367df177@nvidia.com/ So we also need to backport the following patch: arm64: tegra: Update cache properties
K Prateek Nayak (1): drivers: base: cacheinfo: Update cpu_map_populated during CPU Hotplug
Pierre Gondois (8): cacheinfo: Use RISC-V's init_cache_level() as generic OF implementation cacheinfo: Return error code in init_of_cache_level() cacheinfo: Check 'cache-unified' property to count cache leaves ACPI: PPTT: Remove acpi_find_cache_levels() ACPI: PPTT: Update acpi_find_last_cache_level() to acpi_get_cache_info() arch_topology: Build cacheinfo from primary CPU cacheinfo: Initialize variables in fetch_cache_info() arm64: tegra: Update cache properties
Yicong Yang (1): cacheinfo: Fix LLC is not exported through sysfs
arch/arm64/boot/dts/nvidia/tegra194.dtsi | 15 +++ arch/arm64/boot/dts/nvidia/tegra210.dtsi | 1 + arch/arm64/boot/dts/nvidia/tegra234.dtsi | 33 +++++ arch/arm64/kernel/cacheinfo.c | 11 +- arch/riscv/kernel/cacheinfo.c | 42 ------ drivers/acpi/pptt.c | 93 ++++++++------ drivers/base/arch_topology.c | 12 +- drivers/base/cacheinfo.c | 156 +++++++++++++++++++---- include/linux/cacheinfo.h | 11 +- 9 files changed, 262 insertions(+), 112 deletions(-)
From: Pierre Gondois pierre.gondois@arm.com
[ Upstream commit c3719bd9eeb2edf84bd263d662e36ca0ba262a23 ]
RISC-V's implementation of init_of_cache_level() is following the Devicetree Specification v0.3 regarding caches, cf.: - s3.7.3 'Internal (L1) Cache Properties' - s3.8 'Multi-level and Shared Cache Nodes'
Allow reusing the implementation by moving it.
Also make 'levels', 'leaves' and 'level' unsigned int.
Signed-off-by: Pierre Gondois pierre.gondois@arm.com Reviewed-by: Conor Dooley conor.dooley@microchip.com Acked-by: Palmer Dabbelt palmer@rivosinc.com Link: https://lore.kernel.org/r/20230104183033.755668-2-pierre.gondois@arm.com Signed-off-by: Sudeep Holla sudeep.holla@arm.com Signed-off-by: Wen Yang wen.yang@linux.dev --- arch/riscv/kernel/cacheinfo.c | 39 +------------------------------ drivers/base/cacheinfo.c | 44 +++++++++++++++++++++++++++++++++++ include/linux/cacheinfo.h | 1 + 3 files changed, 46 insertions(+), 38 deletions(-)
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c index 90deabfe63ea..440a3df5944c 100644 --- a/arch/riscv/kernel/cacheinfo.c +++ b/arch/riscv/kernel/cacheinfo.c @@ -115,44 +115,7 @@ static void fill_cacheinfo(struct cacheinfo **this_leaf,
int init_cache_level(unsigned int cpu) { - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); - struct device_node *np = of_cpu_device_node_get(cpu); - struct device_node *prev = NULL; - int levels = 0, leaves = 0, level; - - if (of_property_read_bool(np, "cache-size")) - ++leaves; - if (of_property_read_bool(np, "i-cache-size")) - ++leaves; - if (of_property_read_bool(np, "d-cache-size")) - ++leaves; - if (leaves > 0) - levels = 1; - - prev = np; - while ((np = of_find_next_cache_node(np))) { - of_node_put(prev); - prev = np; - if (!of_device_is_compatible(np, "cache")) - break; - if (of_property_read_u32(np, "cache-level", &level)) - break; - if (level <= levels) - break; - if (of_property_read_bool(np, "cache-size")) - ++leaves; - if (of_property_read_bool(np, "i-cache-size")) - ++leaves; - if (of_property_read_bool(np, "d-cache-size")) - ++leaves; - levels = level; - } - - of_node_put(np); - this_cpu_ci->num_levels = levels; - this_cpu_ci->num_leaves = leaves; - - return 0; + return init_of_cache_level(cpu); }
int populate_cache_leaves(unsigned int cpu) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 26e13887aba4..7663eaddd168 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -223,8 +223,52 @@ static int cache_setup_of_node(unsigned int cpu)
return 0; } + +int init_of_cache_level(unsigned int cpu) +{ + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct device_node *np = of_cpu_device_node_get(cpu); + struct device_node *prev = NULL; + unsigned int levels = 0, leaves = 0, level; + + if (of_property_read_bool(np, "cache-size")) + ++leaves; + if (of_property_read_bool(np, "i-cache-size")) + ++leaves; + if (of_property_read_bool(np, "d-cache-size")) + ++leaves; + if (leaves > 0) + levels = 1; + + prev = np; + while ((np = of_find_next_cache_node(np))) { + of_node_put(prev); + prev = np; + if (!of_device_is_compatible(np, "cache")) + break; + if (of_property_read_u32(np, "cache-level", &level)) + break; + if (level <= levels) + break; + if (of_property_read_bool(np, "cache-size")) + ++leaves; + if (of_property_read_bool(np, "i-cache-size")) + ++leaves; + if (of_property_read_bool(np, "d-cache-size")) + ++leaves; + levels = level; + } + + of_node_put(np); + this_cpu_ci->num_levels = levels; + this_cpu_ci->num_leaves = leaves; + + return 0; +} + #else static inline int cache_setup_of_node(unsigned int cpu) { return 0; } +int init_of_cache_level(unsigned int cpu) { return 0; } #endif
int __weak cache_setup_acpi(unsigned int cpu) diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 00b7a6ae8617..ff0328f3fbb0 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -80,6 +80,7 @@ struct cpu_cacheinfo {
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu); int init_cache_level(unsigned int cpu); +int init_of_cache_level(unsigned int cpu); int populate_cache_leaves(unsigned int cpu); int cache_setup_acpi(unsigned int cpu); bool last_level_cache_is_valid(unsigned int cpu);
From: Pierre Gondois pierre.gondois@arm.com
[ Upstream commit 8844c3df001bc1d8397fddea341308da63855d53 ]
Make init_of_cache_level() return an error code when the cache information parsing fails to help detecting missing information.
init_of_cache_level() is only called for riscv. Returning an error code instead of 0 will prevent detect_cache_attributes() to allocate memory if an incomplete DT is parsed.
Signed-off-by: Pierre Gondois pierre.gondois@arm.com Acked-by: Palmer Dabbelt palmer@rivosinc.com Link: https://lore.kernel.org/r/20230104183033.755668-3-pierre.gondois@arm.com Signed-off-by: Sudeep Holla sudeep.holla@arm.com Signed-off-by: Wen Yang wen.yang@linux.dev --- drivers/base/cacheinfo.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 7663eaddd168..480007210bcc 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -245,11 +245,11 @@ int init_of_cache_level(unsigned int cpu) of_node_put(prev); prev = np; if (!of_device_is_compatible(np, "cache")) - break; + goto err_out; if (of_property_read_u32(np, "cache-level", &level)) - break; + goto err_out; if (level <= levels) - break; + goto err_out; if (of_property_read_bool(np, "cache-size")) ++leaves; if (of_property_read_bool(np, "i-cache-size")) @@ -264,6 +264,10 @@ int init_of_cache_level(unsigned int cpu) this_cpu_ci->num_leaves = leaves;
return 0; + +err_out: + of_node_put(np); + return -EINVAL; }
#else
From: Pierre Gondois pierre.gondois@arm.com
[ Upstream commit de0df442ee49cb1f6ee58f3fec5dcb5e5eb70aab ]
The DeviceTree Specification v0.3 specifies that the cache node '[d-|i-|]cache-size' property is required. The 'cache-unified' property is specifies whether the cache level is separate or unified.
If the cache-size property is missing, no cache leaves is accounted. This can lead to a 'BUG: KASAN: slab-out-of-bounds' [1] bug.
Check 'cache-unified' property and always account for at least one cache leaf when parsing the device tree.
[1] https://lore.kernel.org/all/0f19cb3f-d6cf-4032-66d2-dedc9d09a0e3@linaro.org/
Reported-by: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org Signed-off-by: Pierre Gondois pierre.gondois@arm.com Tested-by: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org Link: https://lore.kernel.org/r/20230104183033.755668-4-pierre.gondois@arm.com Signed-off-by: Sudeep Holla sudeep.holla@arm.com Signed-off-by: Wen Yang wen.yang@linux.dev --- drivers/base/cacheinfo.c | 37 ++++++++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 11 deletions(-)
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 480007210bcc..ab99b0f0d010 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -224,12 +224,9 @@ static int cache_setup_of_node(unsigned int cpu) return 0; }
-int init_of_cache_level(unsigned int cpu) +static int of_count_cache_leaves(struct device_node *np) { - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); - struct device_node *np = of_cpu_device_node_get(cpu); - struct device_node *prev = NULL; - unsigned int levels = 0, leaves = 0, level; + unsigned int leaves = 0;
if (of_property_read_bool(np, "cache-size")) ++leaves; @@ -237,6 +234,28 @@ int init_of_cache_level(unsigned int cpu) ++leaves; if (of_property_read_bool(np, "d-cache-size")) ++leaves; + + if (!leaves) { + /* The '[i-|d-|]cache-size' property is required, but + * if absent, fallback on the 'cache-unified' property. + */ + if (of_property_read_bool(np, "cache-unified")) + return 1; + else + return 2; + } + + return leaves; +} + +int init_of_cache_level(unsigned int cpu) +{ + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct device_node *np = of_cpu_device_node_get(cpu); + struct device_node *prev = NULL; + unsigned int levels = 0, leaves, level; + + leaves = of_count_cache_leaves(np); if (leaves > 0) levels = 1;
@@ -250,12 +269,8 @@ int init_of_cache_level(unsigned int cpu) goto err_out; if (level <= levels) goto err_out; - if (of_property_read_bool(np, "cache-size")) - ++leaves; - if (of_property_read_bool(np, "i-cache-size")) - ++leaves; - if (of_property_read_bool(np, "d-cache-size")) - ++leaves; + + leaves += of_count_cache_leaves(np); levels = level; }
From: Pierre Gondois pierre.gondois@arm.com
[ Upstream commit fa4d566a605bc4cf32d69f16ef8cf9696635f75a ]
acpi_find_cache_levels() is used at a single place and is short enough to be merged into the calling function. The removal allows an easier renaming of the calling function in the next patch.
Also reorder the local variables in the 'reversed Christmas tree' order.
Signed-off-by: Pierre Gondois pierre.gondois@arm.com Reviewed-by: Jeremy Linton jeremy.linton@arm.com Acked-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Acked-by: Palmer Dabbelt palmer@rivosinc.com Link: https://lore.kernel.org/r/20230104183033.755668-5-pierre.gondois@arm.com Signed-off-by: Sudeep Holla sudeep.holla@arm.com Signed-off-by: Wen Yang wen.yang@linux.dev --- drivers/acpi/pptt.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-)
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 1938e4778725..01aae0f203b0 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -286,19 +286,6 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he return NULL; }
-static int acpi_find_cache_levels(struct acpi_table_header *table_hdr, - u32 acpi_cpu_id) -{ - int number_of_levels = 0; - struct acpi_pptt_processor *cpu; - - cpu = acpi_find_processor_node(table_hdr, acpi_cpu_id); - if (cpu) - number_of_levels = acpi_count_levels(table_hdr, cpu); - - return number_of_levels; -} - static u8 acpi_cache_type(enum cache_type type) { switch (type) { @@ -621,9 +608,10 @@ static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag) */ int acpi_find_last_cache_level(unsigned int cpu) { - u32 acpi_cpu_id; + struct acpi_pptt_processor *cpu_node; struct acpi_table_header *table; int number_of_levels = 0; + u32 acpi_cpu_id;
table = acpi_get_pptt(); if (!table) @@ -632,7 +620,10 @@ int acpi_find_last_cache_level(unsigned int cpu) pr_debug("Cache Setup find last level CPU=%d\n", cpu);
acpi_cpu_id = get_acpi_id_for_cpu(cpu); - number_of_levels = acpi_find_cache_levels(table, acpi_cpu_id); + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (cpu_node) + number_of_levels = acpi_count_levels(table, cpu_node); + pr_debug("Cache Setup find last level level=%d\n", number_of_levels);
return number_of_levels;
From: Pierre Gondois pierre.gondois@arm.com
[ Upstream commit bd500361a937c03a3da57178287ce543c8f3681b ]
acpi_find_last_cache_level() allows to find the last level of cache for a given CPU. The function is only called on arm64 ACPI based platforms to check for cache information that would be missing in the CLIDR_EL1 register. To allow populating (struct cpu_cacheinfo).num_leaves by only parsing a PPTT, update acpi_find_last_cache_level() to get the 'split_levels', i.e. the number of cache levels being split in data/instruction caches.
It is assumed that there will not be data/instruction caches above a unified cache. If a split level consist of one data cache and no instruction cache (or opposite), then the missing cache will still be populated by default with minimal cache information, and maximal cpumask (all non-existing caches have the same fw_token).
Suggested-by: Jeremy Linton jeremy.linton@arm.com Signed-off-by: Pierre Gondois pierre.gondois@arm.com Reviewed-by: Jeremy Linton jeremy.linton@arm.com Acked-by: Rafael J. Wysocki rafael.j.wysocki@intel.com Acked-by: Palmer Dabbelt palmer@rivosinc.com Link: https://lore.kernel.org/r/20230104183033.755668-6-pierre.gondois@arm.com Signed-off-by: Sudeep Holla sudeep.holla@arm.com Signed-off-by: Wen Yang wen.yang@linux.dev --- arch/arm64/kernel/cacheinfo.c | 11 +++-- drivers/acpi/pptt.c | 76 +++++++++++++++++++++++------------ include/linux/cacheinfo.h | 9 +++-- 3 files changed, 63 insertions(+), 33 deletions(-)
diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c index 1510f457b615..a565e8dc9c15 100644 --- a/arch/arm64/kernel/cacheinfo.c +++ b/arch/arm64/kernel/cacheinfo.c @@ -46,7 +46,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf, int init_cache_level(unsigned int cpu) { unsigned int ctype, level, leaves; - int fw_level; + int fw_level, ret; struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) { @@ -59,10 +59,13 @@ int init_cache_level(unsigned int cpu) leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; }
- if (acpi_disabled) + if (acpi_disabled) { fw_level = of_find_last_cache_level(cpu); - else - fw_level = acpi_find_last_cache_level(cpu); + } else { + ret = acpi_get_cache_info(cpu, &fw_level, NULL); + if (ret < 0) + return ret; + }
if (fw_level < 0) return fw_level; diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 01aae0f203b0..54676e3d82dd 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -81,6 +81,7 @@ static inline bool acpi_pptt_match_type(int table_type, int type) * acpi_pptt_walk_cache() - Attempt to find the requested acpi_pptt_cache * @table_hdr: Pointer to the head of the PPTT table * @local_level: passed res reflects this cache level + * @split_levels: Number of split cache levels (data/instruction). * @res: cache resource in the PPTT we want to walk * @found: returns a pointer to the requested level if found * @level: the requested cache level @@ -100,6 +101,7 @@ static inline bool acpi_pptt_match_type(int table_type, int type) */ static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, unsigned int local_level, + unsigned int *split_levels, struct acpi_subtable_header *res, struct acpi_pptt_cache **found, unsigned int level, int type) @@ -113,8 +115,17 @@ static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, while (cache) { local_level++;
+ if (!(cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)) { + cache = fetch_pptt_cache(table_hdr, cache->next_level_of_cache); + continue; + } + + if (split_levels && + (acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_DATA) || + acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_INSTR))) + *split_levels = local_level; + if (local_level == level && - cache->flags & ACPI_PPTT_CACHE_TYPE_VALID && acpi_pptt_match_type(cache->attributes, type)) { if (*found != NULL && cache != *found) pr_warn("Found duplicate cache level/type unable to determine uniqueness\n"); @@ -135,8 +146,8 @@ static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr, static struct acpi_pptt_cache * acpi_find_cache_level(struct acpi_table_header *table_hdr, struct acpi_pptt_processor *cpu_node, - unsigned int *starting_level, unsigned int level, - int type) + unsigned int *starting_level, unsigned int *split_levels, + unsigned int level, int type) { struct acpi_subtable_header *res; unsigned int number_of_levels = *starting_level; @@ -149,7 +160,8 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr, resource++;
local_level = acpi_pptt_walk_cache(table_hdr, *starting_level, - res, &ret, level, type); + split_levels, res, &ret, + level, type); /* * we are looking for the max depth. Since its potentially * possible for a given node to have resources with differing @@ -165,29 +177,29 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr, }
/** - * acpi_count_levels() - Given a PPTT table, and a CPU node, count the caches + * acpi_count_levels() - Given a PPTT table, and a CPU node, count the cache + * levels and split cache levels (data/instruction). * @table_hdr: Pointer to the head of the PPTT table * @cpu_node: processor node we wish to count caches for + * @levels: Number of levels if success. + * @split_levels: Number of split cache levels (data/instruction) if + * success. Can by NULL. * * Given a processor node containing a processing unit, walk into it and count * how many levels exist solely for it, and then walk up each level until we hit * the root node (ignore the package level because it may be possible to have - * caches that exist across packages). Count the number of cache levels that - * exist at each level on the way up. - * - * Return: Total number of levels found. + * caches that exist across packages). Count the number of cache levels and + * split cache levels (data/instruction) that exist at each level on the way + * up. */ -static int acpi_count_levels(struct acpi_table_header *table_hdr, - struct acpi_pptt_processor *cpu_node) +static void acpi_count_levels(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu_node, + unsigned int *levels, unsigned int *split_levels) { - int total_levels = 0; - do { - acpi_find_cache_level(table_hdr, cpu_node, &total_levels, 0, 0); + acpi_find_cache_level(table_hdr, cpu_node, levels, split_levels, 0, 0); cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); } while (cpu_node); - - return total_levels; }
/** @@ -326,7 +338,7 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
while (cpu_node && !found) { found = acpi_find_cache_level(table_hdr, cpu_node, - &total_levels, level, acpi_type); + &total_levels, NULL, level, acpi_type); *node = cpu_node; cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); } @@ -597,36 +609,48 @@ static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag) }
/** - * acpi_find_last_cache_level() - Determines the number of cache levels for a PE + * acpi_get_cache_info() - Determine the number of cache levels and + * split cache levels (data/instruction) and for a PE. * @cpu: Kernel logical CPU number + * @levels: Number of levels if success. + * @split_levels: Number of levels being split (i.e. data/instruction) + * if success. Can by NULL. * * Given a logical CPU number, returns the number of levels of cache represented * in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0 * indicating we didn't find any cache levels. * - * Return: Cache levels visible to this core. + * Return: -ENOENT if no PPTT table or no PPTT processor struct found. + * 0 on success. */ -int acpi_find_last_cache_level(unsigned int cpu) +int acpi_get_cache_info(unsigned int cpu, unsigned int *levels, + unsigned int *split_levels) { struct acpi_pptt_processor *cpu_node; struct acpi_table_header *table; - int number_of_levels = 0; u32 acpi_cpu_id;
+ *levels = 0; + if (split_levels) + *split_levels = 0; + table = acpi_get_pptt(); if (!table) return -ENOENT;
- pr_debug("Cache Setup find last level CPU=%d\n", cpu); + pr_debug("Cache Setup: find cache levels for CPU=%d\n", cpu);
acpi_cpu_id = get_acpi_id_for_cpu(cpu); cpu_node = acpi_find_processor_node(table, acpi_cpu_id); - if (cpu_node) - number_of_levels = acpi_count_levels(table, cpu_node); + if (!cpu_node) + return -ENOENT;
- pr_debug("Cache Setup find last level level=%d\n", number_of_levels); + acpi_count_levels(table, cpu_node, levels, split_levels);
- return number_of_levels; + pr_debug("Cache Setup: last_level=%d split_levels=%d\n", + *levels, split_levels ? *split_levels : -1); + + return 0; }
/** diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index ff0328f3fbb0..00d8e7f9d1c6 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -88,19 +88,22 @@ bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y); int detect_cache_attributes(unsigned int cpu); #ifndef CONFIG_ACPI_PPTT /* - * acpi_find_last_cache_level is only called on ACPI enabled + * acpi_get_cache_info() is only called on ACPI enabled * platforms using the PPTT for topology. This means that if * the platform supports other firmware configuration methods * we need to stub out the call when ACPI is disabled. * ACPI enabled platforms not using PPTT won't be making calls * to this function so we need not worry about them. */ -static inline int acpi_find_last_cache_level(unsigned int cpu) +static inline +int acpi_get_cache_info(unsigned int cpu, + unsigned int *levels, unsigned int *split_levels) { return 0; } #else -int acpi_find_last_cache_level(unsigned int cpu); +int acpi_get_cache_info(unsigned int cpu, + unsigned int *levels, unsigned int *split_levels); #endif
const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
From: Pierre Gondois pierre.gondois@arm.com
[ Upstream commit 5944ce092b97caed5d86d961e963b883b5c44ee2 ]
commit 3fcbf1c77d08 ("arch_topology: Fix cache attributes detection in the CPU hotplug path") adds a call to detect_cache_attributes() to populate the cacheinfo before updating the siblings mask. detect_cache_attributes() allocates memory and can take the PPTT mutex (on ACPI platforms). On PREEMPT_RT kernels, on secondary CPUs, this triggers a: 'BUG: sleeping function called from invalid context' [1] as the code is executed with preemption and interrupts disabled.
The primary CPU was previously storing the cache information using the now removed (struct cpu_topology).llc_id: commit 5b8dc787ce4a ("arch_topology: Drop LLC identifier stash from the CPU topology")
allocate_cache_info() tries to build the cacheinfo from the primary CPU prior secondary CPUs boot, if the DT/ACPI description contains cache information. If allocate_cache_info() fails, then fallback to the current state for the cacheinfo allocation. [1] will be triggered in such case.
When unplugging a CPU, the cacheinfo memory cannot be freed. If it was, then the memory would be allocated early by the re-plugged CPU and would trigger [1].
Note that populate_cache_leaves() might be called multiple times due to populate_leaves being moved up. This is required since detect_cache_attributes() might be called with per_cpu_cacheinfo(cpu) being allocated but not populated.
[1]: | BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:46 | in_atomic(): 1, irqs_disabled(): 128, non_block: 0, pid: 0, name: swapper/111 | preempt_count: 1, expected: 0 | RCU nest depth: 1, expected: 1 | 3 locks held by swapper/111/0: | #0: (&pcp->lock){+.+.}-{3:3}, at: get_page_from_freelist+0x218/0x12c8 | #1: (rcu_read_lock){....}-{1:3}, at: rt_spin_trylock+0x48/0xf0 | #2: (&zone->lock){+.+.}-{3:3}, at: rmqueue_bulk+0x64/0xa80 | irq event stamp: 0 | hardirqs last enabled at (0): 0x0 | hardirqs last disabled at (0): copy_process+0x5dc/0x1ab8 | softirqs last enabled at (0): copy_process+0x5dc/0x1ab8 | softirqs last disabled at (0): 0x0 | Preemption disabled at: | migrate_enable+0x30/0x130 | CPU: 111 PID: 0 Comm: swapper/111 Tainted: G W 6.0.0-rc4-rt6-[...] | Call trace: | __kmalloc+0xbc/0x1e8 | detect_cache_attributes+0x2d4/0x5f0 | update_siblings_masks+0x30/0x368 | store_cpu_topology+0x78/0xb8 | secondary_start_kernel+0xd0/0x198 | __secondary_switched+0xb0/0xb4
Signed-off-by: Pierre Gondois pierre.gondois@arm.com Reviewed-by: Sudeep Holla sudeep.holla@arm.com Acked-by: Palmer Dabbelt palmer@rivosinc.com Link: https://lore.kernel.org/r/20230104183033.755668-7-pierre.gondois@arm.com Signed-off-by: Sudeep Holla sudeep.holla@arm.com Signed-off-by: Wen Yang wen.yang@linux.dev --- arch/riscv/kernel/cacheinfo.c | 5 --- drivers/base/arch_topology.c | 12 +++++- drivers/base/cacheinfo.c | 71 ++++++++++++++++++++++++++--------- include/linux/cacheinfo.h | 1 + 4 files changed, 65 insertions(+), 24 deletions(-)
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c index 440a3df5944c..3a13113f1b29 100644 --- a/arch/riscv/kernel/cacheinfo.c +++ b/arch/riscv/kernel/cacheinfo.c @@ -113,11 +113,6 @@ static void fill_cacheinfo(struct cacheinfo **this_leaf, } }
-int init_cache_level(unsigned int cpu) -{ - return init_of_cache_level(cpu); -} - int populate_cache_leaves(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index e7d6e6657ffa..b1c1dd38ab01 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -736,7 +736,7 @@ void update_siblings_masks(unsigned int cpuid)
ret = detect_cache_attributes(cpuid); if (ret && ret != -ENOENT) - pr_info("Early cacheinfo failed, ret = %d\n", ret); + pr_info("Early cacheinfo allocation failed, ret = %d\n", ret);
/* update core and thread sibling masks */ for_each_online_cpu(cpu) { @@ -825,7 +825,7 @@ __weak int __init parse_acpi_topology(void) #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) void __init init_cpu_topology(void) { - int ret; + int cpu, ret;
reset_cpu_topology(); ret = parse_acpi_topology(); @@ -840,6 +840,14 @@ void __init init_cpu_topology(void) reset_cpu_topology(); return; } + + for_each_possible_cpu(cpu) { + ret = fetch_cache_info(cpu); + if (ret) { + pr_err("Early cacheinfo failed, ret = %d\n", ret); + break; + } + } }
void store_cpu_topology(unsigned int cpuid) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index ab99b0f0d010..cd943d06d074 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -412,10 +412,6 @@ static void free_cache_attributes(unsigned int cpu) return;
cache_shared_cpu_map_remove(cpu); - - kfree(per_cpu_cacheinfo(cpu)); - per_cpu_cacheinfo(cpu) = NULL; - cache_leaves(cpu) = 0; }
int __weak init_cache_level(unsigned int cpu) @@ -428,29 +424,71 @@ int __weak populate_cache_leaves(unsigned int cpu) return -ENOENT; }
+static inline +int allocate_cache_info(int cpu) +{ + per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), + sizeof(struct cacheinfo), GFP_ATOMIC); + if (!per_cpu_cacheinfo(cpu)) { + cache_leaves(cpu) = 0; + return -ENOMEM; + } + + return 0; +} + +int fetch_cache_info(unsigned int cpu) +{ + struct cpu_cacheinfo *this_cpu_ci; + unsigned int levels, split_levels; + int ret; + + if (acpi_disabled) { + ret = init_of_cache_level(cpu); + if (ret < 0) + return ret; + } else { + ret = acpi_get_cache_info(cpu, &levels, &split_levels); + if (ret < 0) + return ret; + + this_cpu_ci = get_cpu_cacheinfo(cpu); + this_cpu_ci->num_levels = levels; + /* + * This assumes that: + * - there cannot be any split caches (data/instruction) + * above a unified cache + * - data/instruction caches come by pair + */ + this_cpu_ci->num_leaves = levels + split_levels; + } + if (!cache_leaves(cpu)) + return -ENOENT; + + return allocate_cache_info(cpu); +} + int detect_cache_attributes(unsigned int cpu) { int ret;
- /* Since early detection of the cacheinfo is allowed via this - * function and this also gets called as CPU hotplug callbacks via - * cacheinfo_cpu_online, the initialisation can be skipped and only - * CPU maps can be updated as the CPU online status would be update - * if called via cacheinfo_cpu_online path. + /* Since early initialization/allocation of the cacheinfo is allowed + * via fetch_cache_info() and this also gets called as CPU hotplug + * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped + * as it will happen only once (the cacheinfo memory is never freed). + * Just populate the cacheinfo. */ if (per_cpu_cacheinfo(cpu)) - goto update_cpu_map; + goto populate_leaves;
if (init_cache_level(cpu) || !cache_leaves(cpu)) return -ENOENT;
- per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), - sizeof(struct cacheinfo), GFP_ATOMIC); - if (per_cpu_cacheinfo(cpu) == NULL) { - cache_leaves(cpu) = 0; - return -ENOMEM; - } + ret = allocate_cache_info(cpu); + if (ret) + return ret;
+populate_leaves: /* * populate_cache_leaves() may completely setup the cache leaves and * shared_cpu_map or it may leave it partially setup. @@ -459,7 +497,6 @@ int detect_cache_attributes(unsigned int cpu) if (ret) goto free_ci;
-update_cpu_map: /* * For systems using DT for cache hierarchy, fw_token * and shared_cpu_map will be set up here only if they are diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 00d8e7f9d1c6..dfef57077cd0 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -85,6 +85,7 @@ int populate_cache_leaves(unsigned int cpu); int cache_setup_acpi(unsigned int cpu); bool last_level_cache_is_valid(unsigned int cpu); bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y); +int fetch_cache_info(unsigned int cpu); int detect_cache_attributes(unsigned int cpu); #ifndef CONFIG_ACPI_PPTT /*
From: Pierre Gondois pierre.gondois@arm.com
[ Upstream commit ecaef469920fd6d2c7687f19081946f47684a423 ]
Set potentially uninitialized variables to 0. This is particularly relevant when CONFIG_ACPI_PPTT is not set.
Reported-by: kernel test robot lkp@intel.com Link: https://lore.kernel.org/all/202301052307.JYt1GWaJ-lkp@intel.com/ Reported-by: Dan Carpenter error27@gmail.com Link: https://lore.kernel.org/all/Y86iruJPuwNN7rZw@kili/ Fixes: 5944ce092b97 ("arch_topology: Build cacheinfo from primary CPU") Signed-off-by: Pierre Gondois pierre.gondois@arm.com Reviewed-by: Conor Dooley conor.dooley@microchip.com Link: https://lore.kernel.org/r/20230124154053.355376-2-pierre.gondois@arm.com Signed-off-by: Wen Yang wen.yang@linux.dev --- drivers/base/cacheinfo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index cd943d06d074..26ae36f2ff58 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -440,7 +440,7 @@ int allocate_cache_info(int cpu) int fetch_cache_info(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci; - unsigned int levels, split_levels; + unsigned int levels = 0, split_levels = 0; int ret;
if (acpi_disabled) {
From: Yicong Yang yangyicong@hisilicon.com
[ Upstream commit 5c2712387d4850e0b64121d5fd3e6c4e84ea3266 ]
After entering 6.3-rc1 the LLC cacheinfo is not exported on our ACPI based arm64 server. This is because the LLC cacheinfo is partly reset when secondary CPUs boot up. On arm64 the primary cpu will allocate and setup cacheinfo: init_cpu_topology() for_each_possible_cpu() fetch_cache_info() // Allocate cacheinfo and init levels detect_cache_attributes() cache_shared_cpu_map_setup() if (!last_level_cache_is_valid()) // not valid, setup LLC cache_setup_properties() // setup LLC
On secondary CPU boot up: detect_cache_attributes() populate_cache_leaves() get_cache_type() // Get cache type from clidr_el1, // for LLC type=CACHE_TYPE_NOCACHE cache_shared_cpu_map_setup() if (!last_level_cache_is_valid()) // Valid and won't go to this branch, // leave LLC's type=CACHE_TYPE_NOCACHE
The last_level_cache_is_valid() use cacheinfo->{attributes, fw_token} to test it's valid or not, but populate_cache_leaves() will only reset LLC's type, so we won't try to re-setup LLC's type and leave it CACHE_TYPE_NOCACHE and won't export it through sysfs.
This patch tries to fix this by not re-populating the cache leaves if the LLC is valid.
Fixes: 5944ce092b97 ("arch_topology: Build cacheinfo from primary CPU") Signed-off-by: Yicong Yang yangyicong@hisilicon.com Reviewed-by: Pierre Gondois pierre.gondois@arm.com Reviewed-by: Sudeep Holla sudeep.holla@arm.com Link: https://lore.kernel.org/r/20230328114915.33340-1-yangyicong@huawei.com Signed-off-by: Wen Yang wen.yang@linux.dev --- drivers/base/cacheinfo.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 26ae36f2ff58..60ecf7cc0250 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -490,12 +490,18 @@ int detect_cache_attributes(unsigned int cpu)
populate_leaves: /* - * populate_cache_leaves() may completely setup the cache leaves and - * shared_cpu_map or it may leave it partially setup. + * If LLC is valid the cache leaves were already populated so just go to + * update the cpu map. */ - ret = populate_cache_leaves(cpu); - if (ret) - goto free_ci; + if (!last_level_cache_is_valid(cpu)) { + /* + * populate_cache_leaves() may completely setup the cache leaves and + * shared_cpu_map or it may leave it partially setup. + */ + ret = populate_cache_leaves(cpu); + if (ret) + goto free_ci; + }
/* * For systems using DT for cache hierarchy, fw_token
From: K Prateek Nayak kprateek.nayak@amd.com
[ Upstream commit c26fabe73330d983c7ce822c6b6ec0879b4da61f ]
Until commit 5c2712387d48 ("cacheinfo: Fix LLC is not exported through sysfs"), cacheinfo called populate_cache_leaves() for CPU coming online which let the arch specific functions handle (at least on x86) populating the shared_cpu_map. However, with the changes in the aforementioned commit, populate_cache_leaves() is not called when a CPU comes online as a result of hotplug since last_level_cache_is_valid() returns true as the cacheinfo data is not discarded. The CPU coming online is not present in shared_cpu_map, however, it will not be added since the cpu_cacheinfo->cpu_map_populated flag is set (it is set in populate_cache_leaves() when cacheinfo is first populated for x86)
This can lead to inconsistencies in the shared_cpu_map when an offlined CPU comes online again. Example below depicts the inconsistency in the shared_cpu_list in cacheinfo when CPU8 is offlined and onlined again on a 3rd Generation EPYC processor:
# for i in /sys/devices/system/cpu/cpu8/cache/index*/shared_cpu_list; do echo -n "$i: "; cat $i; done /sys/devices/system/cpu/cpu8/cache/index0/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index1/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index2/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index3/shared_cpu_list: 8-15,136-143
# echo 0 > /sys/devices/system/cpu/cpu8/online # echo 1 > /sys/devices/system/cpu/cpu8/online
# for i in /sys/devices/system/cpu/cpu8/cache/index*/shared_cpu_list; do echo -n "$i: "; cat $i; done /sys/devices/system/cpu/cpu8/cache/index0/shared_cpu_list: 8 /sys/devices/system/cpu/cpu8/cache/index1/shared_cpu_list: 8 /sys/devices/system/cpu/cpu8/cache/index2/shared_cpu_list: 8 /sys/devices/system/cpu/cpu8/cache/index3/shared_cpu_list: 8
# cat /sys/devices/system/cpu/cpu136/cache/index0/shared_cpu_list 136
# cat /sys/devices/system/cpu/cpu136/cache/index3/shared_cpu_list 9-15,136-143
Clear the flag when the CPU is removed from shared_cpu_map when cache_shared_cpu_map_remove() is called during CPU hotplug. This will allow cache_shared_cpu_map_setup() to add the CPU coming back online in the shared_cpu_map. Set the flag again when the shared_cpu_map is setup. Following are results of performing the same test as described above with the changes:
# for i in /sys/devices/system/cpu/cpu8/cache/index*/shared_cpu_list; do echo -n "$i: "; cat $i; done /sys/devices/system/cpu/cpu8/cache/index0/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index1/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index2/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index3/shared_cpu_list: 8-15,136-143
# echo 0 > /sys/devices/system/cpu/cpu8/online # echo 1 > /sys/devices/system/cpu/cpu8/online
# for i in /sys/devices/system/cpu/cpu8/cache/index*/shared_cpu_list; do echo -n "$i: "; cat $i; done /sys/devices/system/cpu/cpu8/cache/index0/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index1/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index2/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index3/shared_cpu_list: 8-15,136-143
# cat /sys/devices/system/cpu/cpu136/cache/index0/shared_cpu_list 8,136
# cat /sys/devices/system/cpu/cpu136/cache/index3/shared_cpu_list 8-15,136-143
Fixes: 5c2712387d48 ("cacheinfo: Fix LLC is not exported through sysfs") Signed-off-by: K Prateek Nayak kprateek.nayak@amd.com Reviewed-by: Yicong Yang yangyicong@hisilicon.com Reviewed-by: Sudeep Holla sudeep.holla@arm.com Link: https://lore.kernel.org/r/20230508084115.1157-3-kprateek.nayak@amd.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Wen Yang wen.yang@linux.dev --- drivers/base/cacheinfo.c | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 60ecf7cc0250..9e11d42b0d64 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -365,11 +365,14 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) coherency_max_size = this_leaf->coherency_line_size; }
+ /* shared_cpu_map is now populated for the cpu */ + this_cpu_ci->cpu_map_populated = true; return 0; }
static void cache_shared_cpu_map_remove(unsigned int cpu) { + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *this_leaf, *sib_leaf; unsigned int sibling, index, sib_index;
@@ -404,6 +407,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu) if (of_have_populated_dt()) of_node_put(this_leaf->fw_token); } + + /* cpu is no longer populated in the shared map */ + this_cpu_ci->cpu_map_populated = false; }
static void free_cache_attributes(unsigned int cpu)
From: Pierre Gondois pierre.gondois@arm.com
[ Upstream commit 27f1568b1d5fe35014074f92717b250afbe67031 ]
The DeviceTree Specification v0.3 specifies that the cache node 'compatible' and 'cache-level' properties are 'required'. Cf. s3.8 Multi-level and Shared Cache Nodes The 'cache-unified' property should be present if one of the properties for unified cache is present ('cache-size', ...).
Update the Device Trees accordingly.
Signed-off-by: Pierre Gondois pierre.gondois@arm.com Signed-off-by: Thierry Reding treding@nvidia.com Signed-off-by: Wen Yang wen.yang@linux.dev --- arch/arm64/boot/dts/nvidia/tegra194.dtsi | 15 +++++++++++ arch/arm64/boot/dts/nvidia/tegra210.dtsi | 1 + arch/arm64/boot/dts/nvidia/tegra234.dtsi | 33 ++++++++++++++++++++++++ 3 files changed, 49 insertions(+)
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi index 41f3a7e188d0..ed2a534dcfd6 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi @@ -3029,36 +3029,51 @@ core1 { };
l2c_0: l2-cache0 { + compatible = "cache"; + cache-unified; cache-size = <2097152>; cache-line-size = <64>; cache-sets = <2048>; + cache-level = <2>; next-level-cache = <&l3c>; };
l2c_1: l2-cache1 { + compatible = "cache"; + cache-unified; cache-size = <2097152>; cache-line-size = <64>; cache-sets = <2048>; + cache-level = <2>; next-level-cache = <&l3c>; };
l2c_2: l2-cache2 { + compatible = "cache"; + cache-unified; cache-size = <2097152>; cache-line-size = <64>; cache-sets = <2048>; + cache-level = <2>; next-level-cache = <&l3c>; };
l2c_3: l2-cache3 { + compatible = "cache"; + cache-unified; cache-size = <2097152>; cache-line-size = <64>; cache-sets = <2048>; + cache-level = <2>; next-level-cache = <&l3c>; };
l3c: l3-cache { + compatible = "cache"; + cache-unified; cache-size = <4194304>; cache-line-size = <64>; + cache-level = <3>; cache-sets = <4096>; }; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi index 724e87450605..9474b0da0a3e 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi @@ -2005,6 +2005,7 @@ CPU_SLEEP: cpu-sleep {
L2: l2-cache { compatible = "cache"; + cache-level = <2>; }; };
diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi index 876c73a00a54..e09a4fd8364c 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi @@ -2907,117 +2907,150 @@ core3 { };
l2c0_0: l2-cache00 { + compatible = "cache"; cache-size = <262144>; cache-line-size = <64>; cache-sets = <512>; cache-unified; + cache-level = <2>; next-level-cache = <&l3c0>; };
l2c0_1: l2-cache01 { + compatible = "cache"; cache-size = <262144>; cache-line-size = <64>; cache-sets = <512>; cache-unified; + cache-level = <2>; next-level-cache = <&l3c0>; };
l2c0_2: l2-cache02 { + compatible = "cache"; cache-size = <262144>; cache-line-size = <64>; cache-sets = <512>; cache-unified; + cache-level = <2>; next-level-cache = <&l3c0>; };
l2c0_3: l2-cache03 { + compatible = "cache"; cache-size = <262144>; cache-line-size = <64>; cache-sets = <512>; cache-unified; + cache-level = <2>; next-level-cache = <&l3c0>; };
l2c1_0: l2-cache10 { + compatible = "cache"; cache-size = <262144>; cache-line-size = <64>; cache-sets = <512>; cache-unified; + cache-level = <2>; next-level-cache = <&l3c1>; };
l2c1_1: l2-cache11 { + compatible = "cache"; cache-size = <262144>; cache-line-size = <64>; cache-sets = <512>; cache-unified; + cache-level = <2>; next-level-cache = <&l3c1>; };
l2c1_2: l2-cache12 { + compatible = "cache"; cache-size = <262144>; cache-line-size = <64>; cache-sets = <512>; cache-unified; + cache-level = <2>; next-level-cache = <&l3c1>; };
l2c1_3: l2-cache13 { + compatible = "cache"; cache-size = <262144>; cache-line-size = <64>; cache-sets = <512>; cache-unified; + cache-level = <2>; next-level-cache = <&l3c1>; };
l2c2_0: l2-cache20 { + compatible = "cache"; cache-size = <262144>; cache-line-size = <64>; cache-sets = <512>; cache-unified; + cache-level = <2>; next-level-cache = <&l3c2>; };
l2c2_1: l2-cache21 { + compatible = "cache"; cache-size = <262144>; cache-line-size = <64>; cache-sets = <512>; cache-unified; + cache-level = <2>; next-level-cache = <&l3c2>; };
l2c2_2: l2-cache22 { + compatible = "cache"; cache-size = <262144>; cache-line-size = <64>; cache-sets = <512>; cache-unified; + cache-level = <2>; next-level-cache = <&l3c2>; };
l2c2_3: l2-cache23 { + compatible = "cache"; cache-size = <262144>; cache-line-size = <64>; cache-sets = <512>; cache-unified; + cache-level = <2>; next-level-cache = <&l3c2>; };
l3c0: l3-cache0 { + compatible = "cache"; + cache-unified; cache-size = <2097152>; cache-line-size = <64>; cache-sets = <2048>; + cache-level = <3>; };
l3c1: l3-cache1 { + compatible = "cache"; + cache-unified; cache-size = <2097152>; cache-line-size = <64>; cache-sets = <2048>; + cache-level = <3>; };
l3c2: l3-cache2 { + compatible = "cache"; + cache-unified; cache-size = <2097152>; cache-line-size = <64>; cache-sets = <2048>; + cache-level = <3>; }; };
linux-stable-mirror@lists.linaro.org