Skip to content

Commit 518580a

Browse files
author
Henrik Lindblom
committed
cache: stm32: add cortex-m33 peripheral driver
STM32 Cortex-M33, such as the L5/H5/U5 series, have a cache peripheral for instruction and data caches, which are not present in the C-M33 architecture spec. The driver defaults to direct mapped cache as it uses less power than the alternative set associative mapping [1]. This has also been the default in stm32 soc initialization code for chips that have the ICACHE peripheral, which makes it the safest choice for backward compatibility. The exception to the rule is STM32L5, which has the n-way cache mode selected in SOC code. [1]: https://en.wikipedia.org/wiki/Cache_placement_policies Signed-off-by: Henrik Lindblom <[email protected]>
1 parent 90cd350 commit 518580a

File tree

4 files changed

+208
-0
lines changed

4 files changed

+208
-0
lines changed

drivers/cache/CMakeLists.txt

+1
Original file line numberDiff line numberDiff line change
@@ -10,3 +10,4 @@ zephyr_library_sources_ifdef(CONFIG_CACHE_ANDES cache_andes.c)
1010
zephyr_library_sources_ifdef(CONFIG_USERSPACE cache_handlers.c)
1111
zephyr_library_sources_ifdef(CONFIG_CACHE_NRF_CACHE cache_nrf.c)
1212
zephyr_library_sources_ifdef(CONFIG_CACHE_NXP_XCACHE cache_nxp_xcache.c)
13+
zephyr_library_sources_ifdef(CONFIG_CACHE_STM32 cache_stm32.c)

drivers/cache/Kconfig

+1
Original file line numberDiff line numberDiff line change
@@ -22,5 +22,6 @@ source "drivers/cache/Kconfig.aspeed"
2222
source "drivers/cache/Kconfig.nrf"
2323
source "drivers/cache/Kconfig.andes"
2424
source "drivers/cache/Kconfig.nxp_xcache"
25+
source "drivers/cache/Kconfig.stm32"
2526

2627
endif # CACHE

drivers/cache/Kconfig.stm32

+24
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
# Copyright (c) 2025 Henrik Lindblom <[email protected]>
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
menuconfig CACHE_STM32
5+
bool "STM32 cache driver"
6+
select CACHE_HAS_DRIVER
7+
depends on CACHE_MANAGEMENT
8+
help
9+
Enable support for the STM32 ICACHE / DCACHE peripheral present in some STM32 chips.
10+
11+
if CACHE_STM32
12+
13+
# "default n" for L5 is legacy - could be removed?
14+
config CACHE_STM32_ICACHE_DIRECT_MAPPING
15+
bool "Use 1-way associative mapping for ICACHE"
16+
default n if SOC_SERIES_STM32L5X
17+
default y
18+
help
19+
Use ICACHE in direct mapping (1-way associative) mode instead of the default n-way
20+
associative cache mode.
21+
22+
This option reduces power consumption but slightly reduces cache's performance.
23+
24+
endif # CACHE_STM32

drivers/cache/cache_stm32.c

+182
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,182 @@
1+
/*
2+
* Copyright (c) 2025 Henrik Lindblom <[email protected]>
3+
*
4+
* SPDX-License-Identifier: Apache-2.0
5+
*/
6+
#include <zephyr/kernel.h>
7+
#include <zephyr/drivers/cache.h>
8+
#include <zephyr/logging/log.h>
9+
#include <zephyr/sys/math_extras.h>
10+
#include <stm32_ll_dcache.h>
11+
#include <stm32_ll_icache.h>
12+
13+
LOG_MODULE_REGISTER(cache_stm32, CONFIG_CACHE_LOG_LEVEL);
14+
15+
#ifdef CONFIG_DCACHE
16+
17+
void cache_data_enable(void)
18+
{
19+
LL_DCACHE_Enable(DCACHE1);
20+
#if defined(DCACHE2)
21+
LL_DCACHE_Enable(DCACHE2);
22+
#endif
23+
}
24+
25+
void cache_data_disable(void)
26+
{
27+
cache_data_flush_all();
28+
29+
while (LL_DCACHE_IsActiveFlag_BUSYCMD(DCACHE1)) {
30+
}
31+
32+
LL_DCACHE_Disable(DCACHE1);
33+
LL_DCACHE_ClearFlag_BSYEND(DCACHE1);
34+
35+
#if defined(DCACHE2)
36+
while (LL_DCACHE_IsActiveFlag_BUSYCMD(DCACHE2)) {
37+
}
38+
39+
LL_DCACHE_Disable(DCACHE2);
40+
LL_DCACHE_ClearFlag_BSYEND(DCACHE2);
41+
#endif
42+
}
43+
44+
static int cache_data_manage_range(void *addr, size_t size, uint32_t command)
45+
{
46+
/*
47+
* This is a simple approach to invalidate the range. The address might be in either DCACHE1
48+
* or DCACHE2 (if present). The cache invalidation algorithm checks the TAG memory for the
49+
* specified address range so there's little harm in just checking both caches.
50+
*/
51+
uint32_t start = (uint32_t)addr;
52+
uint32_t end;
53+
54+
if (u32_add_overflow(start, size, &end)) {
55+
return -EOVERFLOW;
56+
}
57+
58+
LL_DCACHE_SetStartAddress(DCACHE1, start);
59+
LL_DCACHE_SetEndAddress(DCACHE1, end);
60+
LL_DCACHE_SetCommand(DCACHE1, command);
61+
LL_DCACHE_StartCommand(DCACHE1);
62+
#if defined(DCACHE2)
63+
LL_DCACHE_SetStartAddress(DCACHE2, start);
64+
LL_DCACHE_SetEndAddress(DCACHE2, end);
65+
LL_DCACHE_SetCommand(DCACHE2, command);
66+
LL_DCACHE_StartCommand(DCACHE2);
67+
#endif
68+
return 0;
69+
}
70+
71+
int cache_data_flush_range(void *addr, size_t size)
72+
{
73+
return cache_data_manage_range(addr, size, LL_DCACHE_COMMAND_CLEAN_BY_ADDR);
74+
}
75+
76+
int cache_data_invd_range(void *addr, size_t size)
77+
{
78+
return cache_data_manage_range(addr, size, LL_DCACHE_COMMAND_INVALIDATE_BY_ADDR);
79+
}
80+
81+
int cache_data_flush_and_invd_range(void *addr, size_t size)
82+
{
83+
return cache_data_manage_range(addr, size, LL_DCACHE_COMMAND_CLEAN_INVALIDATE_BY_ADDR);
84+
}
85+
86+
int cache_data_flush_all(void)
87+
{
88+
return cache_data_flush_range(0, UINT32_MAX);
89+
}
90+
91+
int cache_data_invd_all(void)
92+
{
93+
LL_DCACHE_Invalidate(DCACHE1);
94+
#if defined(DCACHE2)
95+
LL_DCACHE_Invalidate(DCACHE2);
96+
#endif
97+
return 0;
98+
}
99+
100+
int cache_data_flush_and_invd_all(void)
101+
{
102+
return cache_data_flush_and_invd_range(0, UINT32_MAX);
103+
}
104+
105+
#endif /* CONFIG_DCACHE */
106+
107+
static inline void wait_for_icache(void)
108+
{
109+
while (LL_ICACHE_IsActiveFlag_BUSY()) {
110+
}
111+
112+
/* Clear BSYEND to avoid an extra interrupt if somebody enables them. */
113+
LL_ICACHE_ClearFlag_BSYEND();
114+
}
115+
116+
void cache_instr_enable(void)
117+
{
118+
if (IS_ENABLED(CONFIG_CACHE_STM32_ICACHE_DIRECT_MAPPING)) {
119+
LL_ICACHE_SetMode(LL_ICACHE_1WAY);
120+
}
121+
122+
/*
123+
* Need to wait until any pending cache invalidation operations finish. This is recommended
124+
* in the reference manual to ensure execution timing determinism.
125+
*/
126+
wait_for_icache();
127+
LL_ICACHE_Enable();
128+
}
129+
130+
void cache_instr_disable(void)
131+
{
132+
LL_ICACHE_Disable();
133+
134+
while (LL_ICACHE_IsEnabled()) {
135+
/**
136+
* Wait until the ICACHE is disabled (CR.EN=0), at which point
137+
* all requests bypass the cache and are forwarded directly
138+
* from the ICACHE slave port to the ICACHE master port(s).
139+
*
140+
* The cache invalidation will start once disabled, but we allow
141+
* it to proceed in the background since it doesn't need to be
142+
* complete for requests to bypass the ICACHE.
143+
*/
144+
}
145+
}
146+
147+
int cache_instr_flush_all(void)
148+
{
149+
return -ENOTSUP;
150+
}
151+
152+
int cache_instr_invd_all(void)
153+
{
154+
LL_ICACHE_Invalidate();
155+
return 0;
156+
}
157+
158+
int cache_instr_flush_and_invd_all(void)
159+
{
160+
return -ENOTSUP;
161+
}
162+
163+
int cache_instr_flush_range(void *addr, size_t size)
164+
{
165+
ARG_UNUSED(addr);
166+
ARG_UNUSED(size);
167+
return -ENOTSUP;
168+
}
169+
170+
int cache_instr_invd_range(void *addr, size_t size)
171+
{
172+
ARG_UNUSED(addr);
173+
ARG_UNUSED(size);
174+
return -ENOTSUP;
175+
}
176+
177+
int cache_instr_flush_and_invd_range(void *addr, size_t size)
178+
{
179+
ARG_UNUSED(addr);
180+
ARG_UNUSED(size);
181+
return -ENOTSUP;
182+
}

0 commit comments

Comments
 (0)