mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 17:10:13 +00:00
Patch series "Add and use memdesc_flags_t". At some point struct page will be separated from struct slab and struct folio. This is a step towards that by introducing a type for the 'flags' word of all three structures. This gives us a certain amount of type safety by establishing that some of these unsigned longs are different from other unsigned longs in that they contain things like node ID, section number and zone number in the upper bits. That lets us have functions that can be easily called by anyone who has a slab, folio or page (but not easily by anyone else) to get the node or zone. There's going to be some unusual merge problems with this as some odd bits of the kernel decide they want to print out the flags value or something similar by writing page->flags and now they'll need to write page->flags.f instead. That's most of the churn here. Maybe we should be removing these things from the debug output? This patch (of 11): Wrap the unsigned long flags in a typedef. In upcoming patches, this will provide a strong hint that you can't just pass a random unsigned long to functions which take this as an argument. [willy@infradead.org: s/flags/flags.f/ in several architectures] Link: https://lkml.kernel.org/r/aKMgPRLD-WnkPxYm@casper.infradead.org [nicola.vetrini@gmail.com: mips: fix compilation error] Link: https://lore.kernel.org/lkml/CA+G9fYvkpmqGr6wjBNHY=dRp71PLCoi2341JxOudi60yqaeUdg@mail.gmail.com/ Link: https://lkml.kernel.org/r/20250825214245.1838158-1-nicola.vetrini@gmail.com Link: https://lkml.kernel.org/r/20250805172307.1302730-1-willy@infradead.org Link: https://lkml.kernel.org/r/20250805172307.1302730-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Zi Yan <ziy@nvidia.com> Cc: Shakeel Butt <shakeel.butt@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
137 lines
3.9 KiB
C
137 lines
3.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
*/
|
|
#ifndef _ASM_POWERPC_CACHEFLUSH_H
|
|
#define _ASM_POWERPC_CACHEFLUSH_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <asm/cputable.h>
|
|
#include <asm/cpu_has_feature.h>
|
|
|
|
/*
|
|
* This flag is used to indicate that the page pointed to by a pte is clean
|
|
* and does not require cleaning before returning it to the user.
|
|
*/
|
|
#define PG_dcache_clean PG_arch_1
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
/*
|
|
* Book3s has no ptesync after setting a pte, so without this ptesync it's
|
|
* possible for a kernel virtual mapping access to return a spurious fault
|
|
* if it's accessed right after the pte is set. The page fault handler does
|
|
* not expect this type of fault. flush_cache_vmap is not exactly the right
|
|
* place to put this, but it seems to work well enough.
|
|
*/
|
|
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
|
|
{
|
|
asm volatile("ptesync" ::: "memory");
|
|
}
|
|
#define flush_cache_vmap flush_cache_vmap
|
|
#endif /* CONFIG_PPC_BOOK3S_64 */
|
|
|
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
|
/*
|
|
* This is called when a page has been modified by the kernel.
|
|
* It just marks the page as not i-cache clean. We do the i-cache
|
|
* flush later when the page is given to a user process, if necessary.
|
|
*/
|
|
static inline void flush_dcache_folio(struct folio *folio)
|
|
{
|
|
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
|
|
return;
|
|
/* avoid an atomic op if possible */
|
|
if (test_bit(PG_dcache_clean, &folio->flags.f))
|
|
clear_bit(PG_dcache_clean, &folio->flags.f);
|
|
}
|
|
#define flush_dcache_folio flush_dcache_folio
|
|
|
|
static inline void flush_dcache_page(struct page *page)
|
|
{
|
|
flush_dcache_folio(page_folio(page));
|
|
}
|
|
|
|
void flush_icache_range(unsigned long start, unsigned long stop);
|
|
#define flush_icache_range flush_icache_range
|
|
|
|
void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
|
|
unsigned long addr, int len);
|
|
#define flush_icache_user_page flush_icache_user_page
|
|
|
|
void flush_dcache_icache_folio(struct folio *folio);
|
|
|
|
/**
|
|
* flush_dcache_range(): Write any modified data cache blocks out to memory and
|
|
* invalidate them. Does not invalidate the corresponding instruction cache
|
|
* blocks.
|
|
*
|
|
* @start: the start address
|
|
* @stop: the stop address (exclusive)
|
|
*/
|
|
static inline void flush_dcache_range(unsigned long start, unsigned long stop)
|
|
{
|
|
unsigned long shift = l1_dcache_shift();
|
|
unsigned long bytes = l1_dcache_bytes();
|
|
void *addr = (void *)(start & ~(bytes - 1));
|
|
unsigned long size = stop - (unsigned long)addr + (bytes - 1);
|
|
unsigned long i;
|
|
|
|
if (IS_ENABLED(CONFIG_PPC64))
|
|
mb(); /* sync */
|
|
|
|
for (i = 0; i < size >> shift; i++, addr += bytes)
|
|
dcbf(addr);
|
|
mb(); /* sync */
|
|
|
|
}
|
|
|
|
/*
|
|
* Write any modified data cache blocks out to memory.
|
|
* Does not invalidate the corresponding cache lines (especially for
|
|
* any corresponding instruction cache).
|
|
*/
|
|
static inline void clean_dcache_range(unsigned long start, unsigned long stop)
|
|
{
|
|
unsigned long shift = l1_dcache_shift();
|
|
unsigned long bytes = l1_dcache_bytes();
|
|
void *addr = (void *)(start & ~(bytes - 1));
|
|
unsigned long size = stop - (unsigned long)addr + (bytes - 1);
|
|
unsigned long i;
|
|
|
|
for (i = 0; i < size >> shift; i++, addr += bytes)
|
|
dcbst(addr);
|
|
mb(); /* sync */
|
|
}
|
|
|
|
/*
|
|
* Like above, but invalidate the D-cache. This is used by the 8xx
|
|
* to invalidate the cache so the PPC core doesn't get stale data
|
|
* from the CPM (no cache snooping here :-).
|
|
*/
|
|
static inline void invalidate_dcache_range(unsigned long start,
|
|
unsigned long stop)
|
|
{
|
|
unsigned long shift = l1_dcache_shift();
|
|
unsigned long bytes = l1_dcache_bytes();
|
|
void *addr = (void *)(start & ~(bytes - 1));
|
|
unsigned long size = stop - (unsigned long)addr + (bytes - 1);
|
|
unsigned long i;
|
|
|
|
for (i = 0; i < size >> shift; i++, addr += bytes)
|
|
dcbi(addr);
|
|
mb(); /* sync */
|
|
}
|
|
|
|
#ifdef CONFIG_44x
|
|
static inline void flush_instruction_cache(void)
|
|
{
|
|
iccci((void *)KERNELBASE);
|
|
isync();
|
|
}
|
|
#else
|
|
void flush_instruction_cache(void);
|
|
#endif
|
|
|
|
#include <asm-generic/cacheflush.h>
|
|
|
|
#endif /* _ASM_POWERPC_CACHEFLUSH_H */
|