mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-17 12:00:35 +00:00
While the GCC and Clang compilers already define __ASSEMBLER__ automatically when compiling assembler code, __ASSEMBLY__ is a macro that only gets defined by the Makefiles in the kernel. This is bad since macros starting with two underscores are names that are reserved by the C language. It can also be very confusing for the developers when switching between userspace and kernelspace coding, or when dealing with uapi headers that rather should use __ASSEMBLER__ instead. So let's standardize now on the __ASSEMBLER__ macro that is provided by the compilers. This is almost a completely mechanical patch (done with a simple "sed -i" statement), apart from tweaking two comments manually in arch/powerpc/include/asm/bug.h and arch/powerpc/include/asm/kasan.h (which did not have proper underscores at the end) and fixing a checkpatch error about spaces in arch/powerpc/include/asm/spu_csa.h. Signed-off-by: Thomas Huth <thuth@redhat.com> Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com> Link: https://patch.msgid.link/20250801082007.32904-3-thuth@redhat.com
151 lines
2.9 KiB
C
151 lines
2.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_POWERPC_CACHE_H
|
|
#define _ASM_POWERPC_CACHE_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
|
/* bytes per L1 cache line */
|
|
#if defined(CONFIG_PPC_8xx)
|
|
#define L1_CACHE_SHIFT 4
|
|
#define MAX_COPY_PREFETCH 1
|
|
#define IFETCH_ALIGN_SHIFT 2
|
|
#elif defined(CONFIG_PPC_E500MC)
|
|
#define L1_CACHE_SHIFT 6
|
|
#define MAX_COPY_PREFETCH 4
|
|
#define IFETCH_ALIGN_SHIFT 3
|
|
#elif defined(CONFIG_PPC32)
|
|
#define MAX_COPY_PREFETCH 4
|
|
#define IFETCH_ALIGN_SHIFT 3 /* 603 fetches 2 insn at a time */
|
|
#if defined(CONFIG_PPC_47x)
|
|
#define L1_CACHE_SHIFT 7
|
|
#else
|
|
#define L1_CACHE_SHIFT 5
|
|
#endif
|
|
#else /* CONFIG_PPC64 */
|
|
#define L1_CACHE_SHIFT 7
|
|
#define IFETCH_ALIGN_SHIFT 4 /* POWER8,9 */
|
|
#endif
|
|
|
|
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
|
|
|
#define SMP_CACHE_BYTES L1_CACHE_BYTES
|
|
|
|
#define IFETCH_ALIGN_BYTES (1 << IFETCH_ALIGN_SHIFT)
|
|
|
|
#ifdef CONFIG_NOT_COHERENT_CACHE
|
|
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
|
|
#endif
|
|
|
|
#if !defined(__ASSEMBLER__)
|
|
#ifdef CONFIG_PPC64
|
|
|
|
struct ppc_cache_info {
|
|
u32 size;
|
|
u32 line_size;
|
|
u32 block_size; /* L1 only */
|
|
u32 log_block_size;
|
|
u32 blocks_per_page;
|
|
u32 sets;
|
|
u32 assoc;
|
|
};
|
|
|
|
struct ppc64_caches {
|
|
struct ppc_cache_info l1d;
|
|
struct ppc_cache_info l1i;
|
|
struct ppc_cache_info l2;
|
|
struct ppc_cache_info l3;
|
|
};
|
|
|
|
extern struct ppc64_caches ppc64_caches;
|
|
|
|
static inline u32 l1_dcache_shift(void)
|
|
{
|
|
return ppc64_caches.l1d.log_block_size;
|
|
}
|
|
|
|
static inline u32 l1_dcache_bytes(void)
|
|
{
|
|
return ppc64_caches.l1d.block_size;
|
|
}
|
|
|
|
static inline u32 l1_icache_shift(void)
|
|
{
|
|
return ppc64_caches.l1i.log_block_size;
|
|
}
|
|
|
|
static inline u32 l1_icache_bytes(void)
|
|
{
|
|
return ppc64_caches.l1i.block_size;
|
|
}
|
|
#else
|
|
static inline u32 l1_dcache_shift(void)
|
|
{
|
|
return L1_CACHE_SHIFT;
|
|
}
|
|
|
|
static inline u32 l1_dcache_bytes(void)
|
|
{
|
|
return L1_CACHE_BYTES;
|
|
}
|
|
|
|
static inline u32 l1_icache_shift(void)
|
|
{
|
|
return L1_CACHE_SHIFT;
|
|
}
|
|
|
|
static inline u32 l1_icache_bytes(void)
|
|
{
|
|
return L1_CACHE_BYTES;
|
|
}
|
|
|
|
#endif
|
|
|
|
#define __read_mostly __section(".data..read_mostly")
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_32
|
|
extern long _get_L2CR(void);
|
|
extern long _get_L3CR(void);
|
|
extern void _set_L2CR(unsigned long);
|
|
extern void _set_L3CR(unsigned long);
|
|
#else
|
|
#define _get_L2CR() 0L
|
|
#define _get_L3CR() 0L
|
|
#define _set_L2CR(val) do { } while(0)
|
|
#define _set_L3CR(val) do { } while(0)
|
|
#endif
|
|
|
|
static inline void dcbz(void *addr)
|
|
{
|
|
__asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory");
|
|
}
|
|
|
|
static inline void dcbi(void *addr)
|
|
{
|
|
__asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory");
|
|
}
|
|
|
|
static inline void dcbf(void *addr)
|
|
{
|
|
__asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory");
|
|
}
|
|
|
|
static inline void dcbst(void *addr)
|
|
{
|
|
__asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
|
|
}
|
|
|
|
static inline void icbi(void *addr)
|
|
{
|
|
asm volatile ("icbi 0, %0" : : "r"(addr) : "memory");
|
|
}
|
|
|
|
static inline void iccci(void *addr)
|
|
{
|
|
asm volatile ("iccci 0, %0" : : "r"(addr) : "memory");
|
|
}
|
|
|
|
#endif /* !__ASSEMBLER__ */
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ASM_POWERPC_CACHE_H */
|