Skip to content

Commit 0ebf7d1

Browse files
author
Henrik Lindblom
committed
cache: stm32: add cortex-m33 peripheral driver
STM32 Cortex-M33, such as the U5xxx series, have a cache peripheral for instruction and data caches, which are not present in the C-M33 architecture spec. Issue #71268 Signed-off-by: Henrik Lindblom <[email protected]>
1 parent a29c0af commit 0ebf7d1

File tree

4 files changed

+173
-0
lines changed

4 files changed

+173
-0
lines changed

drivers/cache/CMakeLists.txt

+1
Original file line numberDiff line numberDiff line change
@@ -10,3 +10,4 @@ zephyr_library_sources_ifdef(CONFIG_CACHE_ANDES cache_andes.c)
1010
zephyr_library_sources_ifdef(CONFIG_USERSPACE cache_handlers.c)
1111
zephyr_library_sources_ifdef(CONFIG_CACHE_NRF_CACHE cache_nrf.c)
1212
zephyr_library_sources_ifdef(CONFIG_CACHE_NXP_XCACHE cache_nxp_xcache.c)
13+
zephyr_library_sources_ifdef(CONFIG_CACHE_STM32 cache_stm32.c)

drivers/cache/Kconfig

+1
Original file line numberDiff line numberDiff line change
@@ -22,5 +22,6 @@ source "drivers/cache/Kconfig.aspeed"
2222
source "drivers/cache/Kconfig.nrf"
2323
source "drivers/cache/Kconfig.andes"
2424
source "drivers/cache/Kconfig.nxp_xcache"
25+
source "drivers/cache/Kconfig.stm32"
2526

2627
endif # CACHE

drivers/cache/Kconfig.stm32

+9
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
# Copyright (c) 2025 Henrik Lindblom <[email protected]>
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
config CACHE_STM32
5+
bool "STM32 cache driver"
6+
select CACHE_HAS_DRIVER
7+
depends on CACHE_MANAGEMENT
8+
help
9+
Enable support for the STM32 ICACHE / DCACHE peripheral present in some Cortex-M33 chips.

drivers/cache/cache_stm32.c

+162
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,162 @@
1+
/*
2+
* Copyright (c) 2025 Henrik Lindblom <[email protected]>
3+
*
4+
* SPDX-License-Identifier: Apache-2.0
5+
*/
6+
#include <zephyr/kernel.h>
7+
#include <zephyr/drivers/cache.h>
8+
#include <zephyr/logging/log.h>
9+
#include <zephyr/sys/math_extras.h>
10+
#include <stm32_ll_dcache.h>
11+
#include <stm32_ll_icache.h>
12+
13+
LOG_MODULE_REGISTER(cache_stm32, CONFIG_CACHE_LOG_LEVEL);
14+
15+
static inline void wait_for_icache(void)
16+
{
17+
while (LL_ICACHE_IsActiveFlag_BUSY()) {
18+
}
19+
}
20+
21+
void cache_data_enable(void)
22+
{
23+
LL_DCACHE_Enable(DCACHE1);
24+
#if defined(DCACHE2)
25+
LL_DCACHE_Enable(DCACHE2);
26+
#endif
27+
}
28+
29+
void cache_data_disable(void)
30+
{
31+
LL_DCACHE_Disable(DCACHE1);
32+
#if defined(DCACHE2)
33+
LL_DCACHE_Disable(DCACHE2);
34+
#endif
35+
}
36+
37+
int cache_data_flush_all(void)
38+
{
39+
return -ENOTSUP;
40+
}
41+
42+
int cache_data_invd_all(void)
43+
{
44+
LL_DCACHE_Invalidate(DCACHE1);
45+
#if defined(DCACHE2)
46+
LL_DCACHE_Invalidate(DCACHE2);
47+
#endif
48+
return 0;
49+
}
50+
51+
int cache_data_flush_and_invd_all(void)
52+
{
53+
int rc = cache_data_flush_all();
54+
55+
if (rc) {
56+
return rc;
57+
}
58+
59+
return cache_data_invd_all();
60+
}
61+
62+
static int cache_data_manage_range(void *addr, size_t size, uint32_t command)
63+
{
64+
/*
65+
* This is a simple approach to invalidate the range. The address might be in either DCACHE1
66+
* or DCACHE2 (if present). The cache invalidation algorithm checks the TAG memory for the
67+
* specified address range so there's little harm in just checking both caches.
68+
*/
69+
uint32_t start = (uint32_t)addr;
70+
uint32_t end = start;
71+
72+
if (u32_add_overflow(start, size, &end)) {
73+
return -EOVERFLOW;
74+
}
75+
76+
LL_DCACHE_SetStartAddress(DCACHE1, start);
77+
LL_DCACHE_SetEndAddress(DCACHE1, end);
78+
LL_DCACHE_SetCommand(DCACHE1, command);
79+
LL_DCACHE_StartCommand(DCACHE1);
80+
#if defined(DCACHE2)
81+
LL_DCACHE_SetStartAddress(DCACHE2, start);
82+
LL_DCACHE_SetEndAddress(DCACHE2, end);
83+
LL_DCACHE_SetCommand(DCACHE2, command);
84+
LL_DCACHE_StartCommand(DCACHE2);
85+
#endif
86+
return 0;
87+
}
88+
89+
int cache_data_flush_range(void *addr, size_t size)
90+
{
91+
return cache_data_manage_range(addr, size, LL_DCACHE_COMMAND_CLEAN_BY_ADDR);
92+
}
93+
94+
int cache_data_invd_range(void *addr, size_t size)
95+
{
96+
return cache_data_manage_range(addr, size, LL_DCACHE_COMMAND_INVALIDATE_BY_ADDR);
97+
}
98+
99+
int cache_data_flush_and_invd_range(void *addr, size_t size)
100+
{
101+
return cache_data_manage_range(addr, size, LL_DCACHE_COMMAND_CLEAN_INVALIDATE_BY_ADDR);
102+
}
103+
104+
void cache_instr_enable(void)
105+
{
106+
/*
107+
* Need to wait until any pending cache invalidation operations finish.
108+
*/
109+
wait_for_icache();
110+
LL_ICACHE_Enable();
111+
}
112+
113+
void cache_instr_disable(void)
114+
{
115+
LL_ICACHE_Disable();
116+
}
117+
118+
int cache_instr_flush_all(void)
119+
{
120+
return -ENOTSUP;
121+
}
122+
123+
int cache_instr_invd_all(void)
124+
{
125+
LL_ICACHE_Invalidate();
126+
return 0;
127+
}
128+
129+
int cache_instr_flush_and_invd_all(void)
130+
{
131+
int rc = cache_instr_flush_all();
132+
133+
if (rc) {
134+
return rc;
135+
}
136+
return cache_instr_invd_all();
137+
}
138+
139+
int cache_instr_flush_range(void *addr, size_t size)
140+
{
141+
ARG_UNUSED(addr);
142+
ARG_UNUSED(size);
143+
return -ENOTSUP;
144+
}
145+
146+
int cache_instr_invd_range(void *addr, size_t size)
147+
{
148+
ARG_UNUSED(addr);
149+
ARG_UNUSED(size);
150+
return -ENOTSUP;
151+
}
152+
153+
int cache_instr_flush_and_invd_range(void *addr, size_t size)
154+
{
155+
int rc = cache_instr_flush_range(addr, size);
156+
157+
if (rc) {
158+
return rc;
159+
}
160+
161+
return cache_instr_invd_range(addr, size);
162+
}

0 commit comments

Comments
 (0)