Skip to content

Commit c30700d

Browse files
author
Christoph Hellwig
committed
dma-direct: provide generic support for uncached kernel segments
A few architectures support uncached kernel segments. In that case we get an uncached mapping for a given physica address by using an offset in the uncached segement. Implement support for this scheme in the generic dma-direct code instead of duplicating it in arch hooks. Signed-off-by: Christoph Hellwig <[email protected]>
1 parent 67f30ad commit c30700d

File tree

3 files changed

+26
-2
lines changed

3 files changed

+26
-2
lines changed

arch/Kconfig

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -260,6 +260,14 @@ config ARCH_HAS_SET_MEMORY
260260
config ARCH_HAS_SET_DIRECT_MAP
261261
bool
262262

263+
#
264+
# Select if arch has an uncached kernel segment and provides the
265+
# uncached_kernel_address / cached_kernel_address symbols to use it
266+
#
267+
config ARCH_HAS_UNCACHED_SEGMENT
268+
select ARCH_HAS_DMA_PREP_COHERENT
269+
bool
270+
263271
# Select if arch init_task must go in the __init_task_data section
264272
config ARCH_TASK_STRUCT_ON_STACK
265273
bool

include/linux/dma-noncoherent.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,4 +80,7 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size)
8080
}
8181
#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
8282

83+
void *uncached_kernel_address(void *addr);
84+
void *cached_kernel_address(void *addr);
85+
8386
#endif /* _LINUX_DMA_NONCOHERENT_H */

kernel/dma/direct.c

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
158158
*dma_handle = phys_to_dma(dev, page_to_phys(page));
159159
}
160160
memset(ret, 0, size);
161+
162+
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
163+
!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
164+
arch_dma_prep_coherent(page, size);
165+
ret = uncached_kernel_address(ret);
166+
}
167+
161168
return ret;
162169
}
163170

@@ -173,21 +180,27 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
173180

174181
if (force_dma_unencrypted())
175182
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
183+
184+
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
185+
!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT))
186+
cpu_addr = cached_kernel_address(cpu_addr);
176187
__dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
177188
}
178189

179190
void *dma_direct_alloc(struct device *dev, size_t size,
180191
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
181192
{
182-
if (!dev_is_dma_coherent(dev))
193+
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
194+
!dev_is_dma_coherent(dev))
183195
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
184196
return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
185197
}
186198

187199
void dma_direct_free(struct device *dev, size_t size,
188200
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
189201
{
190-
if (!dev_is_dma_coherent(dev))
202+
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
203+
!dev_is_dma_coherent(dev))
191204
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
192205
else
193206
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);

0 commit comments

Comments
 (0)