mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-18 04:20:44 +00:00
This is the main crypto library pull request for 6.17. The main focus
this cycle is on reorganizing the SHA-1 and SHA-2 code, providing
high-quality library APIs for SHA-1 and SHA-2 including HMAC support,
and establishing conventions for lib/crypto/ going forward:
- Migrate the SHA-1 and SHA-512 code (and also SHA-384 which shares
most of the SHA-512 code) into lib/crypto/. This includes both the
generic and architecture-optimized code. Greatly simplify how the
architecture-optimized code is integrated. Add an easy-to-use
library API for each SHA variant, including HMAC support. Finally,
reimplement the crypto_shash support on top of the library API.
- Apply the same reorganization to the SHA-256 code (and also SHA-224
which shares most of the SHA-256 code). This is a somewhat smaller
change, due to my earlier work on SHA-256. But this brings in all
the same additional improvements that I made for SHA-1 and SHA-512.
There are also some smaller changes:
- Move the architecture-optimized ChaCha, Poly1305, and BLAKE2s code
from arch/$(SRCARCH)/lib/crypto/ to lib/crypto/$(SRCARCH)/. For
these algorithms it's just a move, not a full reorganization yet.
- Fix the MIPS chacha-core.S to build with the clang assembler.
- Fix the Poly1305 functions to work in all contexts.
- Fix a performance regression in the x86_64 Poly1305 code.
- Clean up the x86_64 SHA-NI optimized SHA-1 assembly code.
Note that since the new organization of the SHA code is much simpler,
the diffstat of this pull request is negative, despite the addition of
new fully-documented library APIs for multiple SHA and HMAC-SHA
variants. These APIs will allow further simplifications across the
kernel as users start using them instead of the old-school crypto API.
(I've already written a lot of such conversion patches, removing over
1000 more lines of code. But most of those will target 6.18 or later.)
-----BEGIN PGP SIGNATURE-----
iIoEABYIADIWIQSacvsUNc7UX4ntmEPzXCl4vpKOKwUCaIZ93BQcZWJpZ2dlcnNA
a2VybmVsLm9yZwAKCRDzXCl4vpKOK8HCAQD3O9P0qd6wscne5XuRwaybzKHQ2AqU
OlhlDZWQQEvYAgD/aa6KP/DS+8RKGj0TBn6bACAJyXyDygFXq5a5s9pGzAs=
=UmMM
-----END PGP SIGNATURE-----
Merge tag 'libcrypto-updates-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux
Pull crypto library updates from Eric Biggers:
"This is the main crypto library pull request for 6.17. The main focus
this cycle is on reorganizing the SHA-1 and SHA-2 code, providing
high-quality library APIs for SHA-1 and SHA-2 including HMAC support,
and establishing conventions for lib/crypto/ going forward:
- Migrate the SHA-1 and SHA-512 code (and also SHA-384 which shares
most of the SHA-512 code) into lib/crypto/. This includes both the
generic and architecture-optimized code. Greatly simplify how the
architecture-optimized code is integrated. Add an easy-to-use
library API for each SHA variant, including HMAC support. Finally,
reimplement the crypto_shash support on top of the library API.
- Apply the same reorganization to the SHA-256 code (and also SHA-224
which shares most of the SHA-256 code). This is a somewhat smaller
change, due to my earlier work on SHA-256. But this brings in all
the same additional improvements that I made for SHA-1 and SHA-512.
There are also some smaller changes:
- Move the architecture-optimized ChaCha, Poly1305, and BLAKE2s code
from arch/$(SRCARCH)/lib/crypto/ to lib/crypto/$(SRCARCH)/. For
these algorithms it's just a move, not a full reorganization yet.
- Fix the MIPS chacha-core.S to build with the clang assembler.
- Fix the Poly1305 functions to work in all contexts.
- Fix a performance regression in the x86_64 Poly1305 code.
- Clean up the x86_64 SHA-NI optimized SHA-1 assembly code.
Note that since the new organization of the SHA code is much simpler,
the diffstat of this pull request is negative, despite the addition of
new fully-documented library APIs for multiple SHA and HMAC-SHA
variants.
These APIs will allow further simplifications across the kernel as
users start using them instead of the old-school crypto API. (I've
already written a lot of such conversion patches, removing over 1000
more lines of code. But most of those will target 6.18 or later)"
* tag 'libcrypto-updates-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux: (67 commits)
lib/crypto: arm64/sha512-ce: Drop compatibility macros for older binutils
lib/crypto: x86/sha1-ni: Convert to use rounds macros
lib/crypto: x86/sha1-ni: Minor optimizations and cleanup
crypto: sha1 - Remove sha1_base.h
lib/crypto: x86/sha1: Migrate optimized code into library
lib/crypto: sparc/sha1: Migrate optimized code into library
lib/crypto: s390/sha1: Migrate optimized code into library
lib/crypto: powerpc/sha1: Migrate optimized code into library
lib/crypto: mips/sha1: Migrate optimized code into library
lib/crypto: arm64/sha1: Migrate optimized code into library
lib/crypto: arm/sha1: Migrate optimized code into library
crypto: sha1 - Use same state format as legacy drivers
crypto: sha1 - Wrap library and add HMAC support
lib/crypto: sha1: Add HMAC support
lib/crypto: sha1: Add SHA-1 library functions
lib/crypto: sha1: Rename sha1_init() to sha1_init_raw()
crypto: x86/sha1 - Rename conflicting symbol
lib/crypto: sha2: Add hmac_sha*_init_usingrawkey()
lib/crypto: arm/poly1305: Remove unneeded empty weak function
lib/crypto: x86/poly1305: Fix performance regression on short messages
...
82 lines
2.3 KiB
Makefile
82 lines
2.3 KiB
Makefile
# SPDX-License-Identifier: GPL-2.0
|
|
#
|
|
# Makefile for ppc-specific library files..
|
|
#
|
|
|
|
CFLAGS_code-patching.o += -fno-stack-protector
|
|
CFLAGS_feature-fixups.o += -fno-stack-protector
|
|
|
|
CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
|
|
CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
|
|
|
|
KASAN_SANITIZE_code-patching.o := n
|
|
KASAN_SANITIZE_feature-fixups.o := n
|
|
# restart_table.o contains functions called in the NMI interrupt path
|
|
# which can be in real mode. Disable KASAN.
|
|
KASAN_SANITIZE_restart_table.o := n
|
|
KCSAN_SANITIZE_code-patching.o := n
|
|
KCSAN_SANITIZE_feature-fixups.o := n
|
|
|
|
ifdef CONFIG_KASAN
|
|
CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
|
|
CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING
|
|
endif
|
|
|
|
CFLAGS_code-patching.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
|
|
CFLAGS_feature-fixups.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
|
|
|
|
obj-y += code-patching.o feature-fixups.o pmem.o
|
|
|
|
obj-$(CONFIG_CODE_PATCHING_SELFTEST) += test-code-patching.o
|
|
|
|
ifndef CONFIG_KASAN
|
|
obj-y += string.o memcmp_$(BITS).o
|
|
obj-$(CONFIG_PPC32) += strlen_32.o
|
|
endif
|
|
|
|
obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o
|
|
|
|
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
|
|
|
|
# See corresponding test in arch/powerpc/Makefile
|
|
# 64-bit linker creates .sfpr on demand for final link (vmlinux),
|
|
# so it is only needed for modules, and only for older linkers which
|
|
# do not support --save-restore-funcs
|
|
ifndef CONFIG_LD_IS_BFD
|
|
always-$(CONFIG_PPC64) += crtsavres.o
|
|
endif
|
|
|
|
obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
|
|
memcpy_power7.o restart_table.o
|
|
|
|
obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
|
|
memcpy_64.o copy_mc_64.o
|
|
|
|
ifdef CONFIG_PPC_QUEUED_SPINLOCKS
|
|
obj-$(CONFIG_SMP) += qspinlock.o
|
|
else
|
|
obj64-$(CONFIG_SMP) += locks.o
|
|
endif
|
|
|
|
obj64-$(CONFIG_ALTIVEC) += vmx-helper.o
|
|
obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o \
|
|
test_emulate_step_exec_instr.o
|
|
|
|
obj-y += checksum_$(BITS).o checksum_wrappers.o \
|
|
string_$(BITS).o
|
|
|
|
obj-y += sstep.o
|
|
obj-$(CONFIG_PPC_FPU) += ldstfp.o
|
|
obj64-y += quad.o
|
|
|
|
obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
|
|
|
|
obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
|
|
|
|
obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o
|
|
CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec)
|
|
# Enable <altivec.h>
|
|
CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include)
|
|
|
|
obj-$(CONFIG_PPC64) += $(obj64-y)
|