1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

Compare commits

...

35 Commits

Author SHA1 Message Date
Linus Torvalds
8c8081cc59 spi: Fixes for v6.19
A few small fixes for SPI that came in during the merge window, nothing
 too exciting here.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEreZoqmdXGLWf4p/qJNaLcl1Uh9AFAmk6EtEACgkQJNaLcl1U
 h9A9vgf/fOnDEk6u9ZbNv7YnfCxWjklsKypkZge/muwJJKFjb1ifWIVXURqyL05Z
 QQXy+5g4shjYnh82cfKIBs7jonpizaGu5igO+3zE3ZcgS+/eTV0unlcunPEGKmWZ
 TC1Zqub0e2YCasYl2djlDWM6Rk2fiACa1ufvn2u8eDNBRsxLTzzcoPXDgt8MMGf/
 67XpDCJCgtWtnV6fx3oVkBMtLveqlzOVbucXjOudlp1u4XxTyBLYfo6Y5OUYqJ/Y
 rquGDu5yuG9/gE1IUH+T2aqSvywl0PS0sXaYmWjTnH8N2veDU3Vh0r2BsuHvthp9
 0S4Iz/zjfOxDQUT1kDpEO/p5e6cVoQ==
 =eAK0
 -----END PGP SIGNATURE-----

Merge tag 'spi-fix-v6.19-merge-window' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi

Pull spi fixes from Mark Brown:
 "A few small fixes for SPI that came in during the merge window,
  nothing too exciting here"

* tag 'spi-fix-v6.19-merge-window' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi:
  spi: microchip-core: Fix an error handling path in mchp_corespi_probe()
  spi: cadence-qspi: Fix runtime PM imbalance in probe
2025-12-11 09:57:08 +09:00
Linus Torvalds
31ca9ff64a regulator: Fixes for v6.19
A few fixes that came in during the merge window, nothing too exciting -
 the one core fix improves error propagation from gpiolib which hopefully
 shouldn't actually happen but is safer.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEreZoqmdXGLWf4p/qJNaLcl1Uh9AFAmk6FNQACgkQJNaLcl1U
 h9C3qgf9E7YZXR3nfhg/VgK7kqo5Q+5Wpn3FUF9WvZWcvHqAMf1muCUzq+KaA0yB
 AGPEspSH+YmC1fYAVCDagBFp/cT/NSdzyP7GwtN75V88V4qbu28wpMAFNjSnoP7w
 N3T4VhJQCqnHCyBbYUFcOemT3H6lzEsqAsnDvSJsSxOiH7w8H/0dYvbpv+X8wiWZ
 R0DWolVBgqM3zjn7Dt2n4VM6jSpeDzg3zM+xWD9svVDVoRrcL0dWIaJLQMTcbu4e
 63sHWn9148oza10dNMPXuMN78pNBpZRBE/aTu3uD1N9+8qJMCMu29UKokCbEsVfR
 mbQ+AzymoRprRe78m2dfKvxi023mBQ==
 =Zpyb
 -----END PGP SIGNATURE-----

Merge tag 'regulator-fix-v6.19-merge-window' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator

Pull regulator fixes from Mark Brown:
 "A few fixes that came in during the merge window, nothing too
  exciting - the one core fix improves error propagation from gpiolib
  which hopefully shouldn't actually happen but is safer"

* tag 'regulator-fix-v6.19-merge-window' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator:
  regulator: spacemit: Align input supply name with the DT binding
  regulator: fixed: Rely on the core freeing the enable GPIO
  regulator: check the return value of gpiod_set_value_cansleep()
2025-12-11 09:54:59 +09:00
Linus Torvalds
1de741159b slab fix for 6.19-rc1
-----BEGIN PGP SIGNATURE-----
 
 iQFPBAABCAA5FiEEe7vIQRWZI0iWSE3xu+CwddJFiJoFAmk5RPgbFIAAAAAABAAO
 bWFudTIsMi41KzEuMTEsMiwyAAoJELvgsHXSRYiaqKoH/3VgscOpeq15HnX5yxt9
 Es2OGMmAQOxuYTKJPL7QQEt2Mtz64RkouwzFUmh3kdKtvMBbs6j/OxPs8nfk6yxf
 g0RwAk+55qsW07k4hGT4XLTSjuz5r3Wk8mkuXzzNZjbVIDN0ssrKdLx3d9OLhpmC
 iG8CGz3zuWbJ5cVQEmwE11rErwlMiXQjxE7KMZXGMCgTZw6vBZ40oVnGrCl/Ez8/
 aEW7jQpnaJr4y7kYrhNdvGTZtQrN+QR5ey/HTfCbpj76H8YHabjzCZarsznMgH/t
 rUNs+o7utJF5k2EnCyHpNWUk+ky3ixdgbusdh9i8hchuDR9dbrOz8yCnriUhy9mT
 5g4=
 =AJKw
 -----END PGP SIGNATURE-----

Merge tag 'slab-for-6.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab

Pull slab fix from Vlastimil Babka:

 - A stable fix for performance regression in tests that perform
   kmem_cache_destroy() a lot, due to unnecessarily wide scope of
   kvfree_rcu_barrier() (Harry Yoo)

* tag 'slab-for-6.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
  mm/slab: introduce kvfree_rcu_barrier_on_cache() for cache destruction
2025-12-11 08:54:08 +09:00
Linus Torvalds
0723a166d1 more s390 updates for 6.19 merge window
- Use the MSI parent domain API instead of the legacy API for setup and
   teardown of PCI MSI IRQs
 
 - Select POSIX_CPU_TIMERS_TASK_WORK now that VIRT_XFER_TO_GUEST_WORK has
   been implemented for s390
 
 - Fix a KVM bug which can lead to guest memory corruption
 
 - Fix KASAN shadow memory mapping for hotplugged memory
 
 - Minor bug fixes and improvements
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEECMNfWEw3SLnmiLkZIg7DeRspbsIFAmk5WtQACgkQIg7DeRsp
 bsKOwRAAg7uNYB0nU5CYl/0GeExumHBxQT/B1Umxg8lCDf4rwAPwwSrV3Dpp04h5
 8FpKH42x8sig1yKuqjrBgHTlRofmSlZOrtDI9xv/dtp8yJWkAaMY0Ec8sf1VwEEO
 dgyEPH567ja+mWu5z74eFmxYSV5dsgcJ707DICTyDXB3DaIzmVH5D5f8yYbMs8eL
 gJ4bAuRkzwV+hp1UXLaWWELDrTX2KpQ3EBPz/2SuS8e3KbmL7MuDRdMbTJ7oEb1+
 fGGnm3qFhHszx5fGWUv/BrGsQidwZTe459JdQtKcQUsPkGqBjsbhmiq9ApomD2+7
 MJFDGDVHt623iBNwDwygAPCrWrKoVUFlw5MaKfztvSiuKt19N6OrwN0R4VA8P46I
 m9mVRSLeXTv4GzZcTmLsRb2cLJ3Z6co2HuDli4X1Jg3TTWBFzx0Jscp9dB1QKmoI
 EXBX44LoL5bUZeh5cihqGC5YBuXeaYyMnnsP56eJhq56QK9r4WDMcvGezBbzbkHE
 I3GjmFwEUr3U9hSx4xmUGh6pry5PLkuDch5So/vh1miAdHjZFFthXHK2s5UI+HUl
 OGjNxI9q2WzX4bb1C090zVljvYAsP6YzKnk7Ks76+0mEBvykTsGx3Ms4oS1L0k5+
 Z8R2J0BLfcBQU7pL0QoYE/fO2J5wo+atp/cBLwc2hd+G9YEvdTs=
 =Nbs5
 -----END PGP SIGNATURE-----

Merge tag 's390-6.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Heiko Carstens:

 - Use the MSI parent domain API instead of the legacy API for setup and
   teardown of PCI MSI IRQs

 - Select POSIX_CPU_TIMERS_TASK_WORK now that VIRT_XFER_TO_GUEST_WORK
   has been implemented for s390

 - Fix a KVM bug which can lead to guest memory corruption

 - Fix KASAN shadow memory mapping for hotplugged memory

 - Minor bug fixes and improvements

* tag 's390-6.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/bug: Add missing alignment
  s390/bug: Add missing CONFIG_BUG ifdef again
  KVM: s390: Fix gmap_helper_zap_one_page() again
  s390/pci: Migrate s390 IRQ logic to IRQ domain API
  genirq: Change hwirq parameter to irq_hw_number_t
  s390: Select POSIX_CPU_TIMERS_TASK_WORK
  s390: Unmap early KASAN shadow on memory offlining
  s390/vmem: Support 2G page splitting for KASAN shadow freeing
  s390/boot: Use entire page for PTEs
  s390/vmur: Use scnprintf() instead of sprintf()
2025-12-11 08:19:46 +09:00
Linus Torvalds
840b22edd5 dma-mapping fixes for Linux 6.19:
- last minute fix for missing parenthesis in the recently merged code
 (Hans de Goede) and removal of the excessive, non-fatal warnings (Dave
 Kleikamp)
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQSrngzkoBtlA8uaaJ+Jp1EFxbsSRAUCaTmllwAKCRCJp1EFxbsS
 RNDxAQCZJ9/D9nJ+hbC2oVPyoenmhSfMqc9ZniiR8/z8UqBfqgEAsWWayVyJXiwp
 aIWXvW6lnUGzgNZ64ZKeNWvKK3uJOgg=
 =9lhd
 -----END PGP SIGNATURE-----

Merge tag 'dma-mapping-6.19-2025-12-10' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux

Pull dma-mapping fixes from Marek Szyprowski:

 - last minute fix for missing parenthesis in recently merged code (Hans
   de Goede)

 - removal of excessive, non-fatal warnings (Dave Kleikamp)

* tag 'dma-mapping-6.19-2025-12-10' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux:
  dma-mapping: Fix DMA_BIT_MASK() macro being broken
  dma/pool: eliminate alloc_pages warning in atomic_pool_expand
2025-12-11 08:14:23 +09:00
Linus Torvalds
5c179cac05 alpha updates for v6.19
- alpha: don't reference obsolete termio struct for TC* constants
 - alpha: Replace __ASSEMBLY__ with __ASSEMBLER__ in the alpha headers
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQTIw07U9ivnHy+tugnaDr6nZQzE/AUCaTnGqQAKCRDaDr6nZQzE
 /JkzAP0exV6ochhOfoNW3qKP+yBNdZNfEutbye9S0e3zz3r42wEApzah9rgNJNNW
 nUM8/r0NZRIFuwYhwJR42fPhnvqGVwo=
 =sf4L
 -----END PGP SIGNATURE-----

Merge tag 'alpha-for-v6.19-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/lindholm/alpha

Pull alpha updates from Magnus Lindholm:
 "Two small uapi fixes. One patch hardcodes TC* ioctl values that
  previously depended on the deprecated termio struct, avoiding build
  issues with newer glibc versions. The other patch switches uapi
  headers to use the compiler-defined __ASSEMBLER__ macro for better
  consistency between kernel and userspace.

    - don't reference obsolete termio struct for TC* constants

    - Replace __ASSEMBLY__ with __ASSEMBLER__ in the alpha headers"

* tag 'alpha-for-v6.19-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/lindholm/alpha:
  alpha: don't reference obsolete termio struct for TC* constants
  alpha: Replace __ASSEMBLY__ with __ASSEMBLER__ in the alpha headers
2025-12-11 08:01:52 +09:00
Linus Torvalds
29ba26af9a ARM development for 6.19-rc1
ARM Development changes for 6.19-rc1:
 - disable jump label and high PTE for PREEMPT RT kernels
 - fix input operand modification in load_unaligned_zeropad()
 - fix hash_name() / fault path induced warnings
 - fix branch predictor hardening
 
 The last three were only merged today after testing was complete.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEuNNh8scc2k/wOAE+9OeQG+StrGQFAmk5Z3cACgkQ9OeQG+St
 rGT5/A//Tl81Ae4+Dj5CvC73Qzm3xsaPpTdZbw/TnmiWLGeps5h1qunZsdT5kA0K
 PGGPEVEyS5F1hgl0gKEUtWyi6qs37tvB72pG/fPX2EBuryq0PLizWa2ejPw+z1J6
 fKpX3C+5Fo+z8/jcL1uc0fZtw2ILnQTbxjIyKx5oJrX8yVvU+tKtfegBp6hZbQdj
 z9QArtc0zMyT71eEsiL0O0/cHx8m1jbtZAb8VgIr6xtTR5pdvq38rovMmua0XgEG
 v17pSRdfqxZldXcWNMHZlTFcmm1Zrq6twpkPMnks6TZ0u/YCqcIsY7gQLHqUCJH7
 VqENeqIXN4mvufz8Hb5tQqME9NDmoJsKrDDMyyOHpe2sW4j+uVs6CAZTxmaGXbw0
 Hf1xf9I4sonCh/TDloBB1++jcZWIIbCBpZ0/OuZJHEp9SJgwrk4wlFvMWdVfuzfZ
 gZ2TEa6r7KzqJSFEcfrNoIDvJXBNlAw7mL+QIEPLEAuJvSceqTqhWC+Kx7VS/8JZ
 0lAuOP42IG6uX2K+VZ/I/agYb/Gip8HcrgletQFzid4C8QMkB7z3Dq76gbe/0f0f
 US1mZFBnzujq/kDC43IzPphVU+5WTyn8YWCz2LzoTZ3RN+4SlIkq5ZnFreOk/aGU
 sQj+SkNhJX9qFHdijONmqef6f7miIH8iPMmp58WFYvulrdYnMHg=
 =dtsd
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rmk/linux

Pull ARM updates from Russell King:

 - disable jump label and high PTE for PREEMPT RT kernels

 - fix input operand modification in load_unaligned_zeropad()

 - fix hash_name() / fault path induced warnings

 - fix branch predictor hardening

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rmk/linux:
  ARM: fix branch predictor hardening
  ARM: fix hash_name() fault
  ARM: allow __do_kernel_fault() to report execution of memory faults
  ARM: group is_permission_fault() with is_translation_fault()
  ARM: 9464/1: fix input-only operand modification in load_unaligned_zeropad()
  ARM: 9461/1: Disable HIGHPTE on PREEMPT_RT kernels
  ARM: 9459/1: Disable jump-label on PREEMPT_RT
2025-12-11 07:50:48 +09:00
Russell King (Oracle)
dd9143371a Merge branches 'fixes' and 'misc' into for-next 2025-12-10 12:22:37 +00:00
Russell King (Oracle)
fd2dee1c6e ARM: fix branch predictor hardening
__do_user_fault() may be called with indeterminent interrupt enable
state, which means we may be preemptive at this point. This causes
problems when calling harden_branch_predictor(). For example, when
called from a data abort, do_alignment_fault()->do_bad_area().

Move harden_branch_predictor() out of __do_user_fault() and into the
calling contexts.

Moving it into do_kernel_address_page_fault(), we can be sure that
interrupts will be disabled here.

Converting do_translation_fault() to use do_kernel_address_page_fault()
rather than do_bad_area() means that we keep branch predictor handling
for translation faults. Interrupts will also be disabled at this call
site.

do_sect_fault() needs special handling, so detect user mode accesses
to kernel-addresses, and add an explicit call to branch predictor
hardening.

Finally, add branch predictor hardening to do_alignment() for the
faulting case (user mode accessing kernel addresses) before interrupts
are enabled.

This should cover all cases where harden_branch_predictor() is called,
ensuring that it is always has interrupts disabled, also ensuring that
it is called early in each call path.

Reviewed-by: Xie Yuanbin <xieyuanbin1@huawei.com>
Tested-by: Xie Yuanbin <xieyuanbin1@huawei.com>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
2025-12-10 12:22:15 +00:00
Russell King (Oracle)
7733bc7d29 ARM: fix hash_name() fault
Zizhi Wo reports:

"During the execution of hash_name()->load_unaligned_zeropad(), a
 potential memory access beyond the PAGE boundary may occur. For
 example, when the filename length is near the PAGE_SIZE boundary.
 This triggers a page fault, which leads to a call to
 do_page_fault()->mmap_read_trylock(). If we can't acquire the lock,
 we have to fall back to the mmap_read_lock() path, which calls
 might_sleep(). This breaks RCU semantics because path lookup occurs
 under an RCU read-side critical section."

This is seen with CONFIG_DEBUG_ATOMIC_SLEEP=y and CONFIG_KFENCE=y.

Kernel addresses (with the exception of the vectors/kuser helper
page) do not have VMAs associated with them. If the vectors/kuser
helper page faults, then there are two possibilities:

1. if the fault happened while in kernel mode, then we're basically
   dead, because the CPU won't be able to vector through this page
   to handle the fault.
2. if the fault happened while in user mode, that means the page was
   protected from user access, and we want to fault anyway.

Thus, we can handle kernel addresses from any context entirely
separately without going anywhere near the mmap lock. This gives us
an entirely non-sleeping path for all kernel mode kernel address
faults.

As we handle the kernel address faults before interrupts are enabled,
this change has the side effect of improving the branch predictor
hardening, but does not completely solve the issue.

Reported-by: Zizhi Wo <wozizhi@huaweicloud.com>
Reported-by: Xie Yuanbin <xieyuanbin1@huawei.com>
Link: https://lore.kernel.org/r/20251126090505.3057219-1-wozizhi@huaweicloud.com
Reviewed-by: Xie Yuanbin <xieyuanbin1@huawei.com>
Tested-by: Xie Yuanbin <xieyuanbin1@huawei.com>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
2025-12-10 12:22:02 +00:00
Russell King (Oracle)
40b466db1d ARM: allow __do_kernel_fault() to report execution of memory faults
Allow __do_kernel_fault() to detect the execution of memory, so we can
provide the same fault message as do_page_fault() would do. This is
required when we split the kernel address fault handling from the
main do_page_fault() code path.

Reviewed-by: Xie Yuanbin <xieyuanbin1@huawei.com>
Tested-by: Xie Yuanbin <xieyuanbin1@huawei.com>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
2025-12-10 12:21:46 +00:00
Russell King (Oracle)
dea20281ac ARM: group is_permission_fault() with is_translation_fault()
Group is_permission_fault() with is_translation_fault(), which is
needed to use is_permission_fault() in __do_kernel_fault(). As
this is static inline, there is no need for this to be under
CONFIG_MMU.

Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
2025-12-09 09:19:10 +00:00
Liyuan Pang
edb924a721 ARM: 9464/1: fix input-only operand modification in load_unaligned_zeropad()
In the inline assembly inside load_unaligned_zeropad(), the "addr" is
constrained as input-only operand. The compiler assumes that on exit
from the asm statement these operands contain the same values as they
had before executing the statement, but when kernel page fault happened, the assembly fixup code "bic %2 %2, #0x3" modify the value of "addr", which may lead to an unexpected behavior.

Use a temporary variable "tmp" to handle it, instead of modifying the
input-only operand, just like what arm64's load_unaligned_zeropad()
does.

Fixes: b9a50f74905a ("ARM: 7450/1: dcache: select DCACHE_WORD_ACCESS for little-endian ARMv6+ CPUs")
Co-developed-by: Xie Yuanbin <xieyuanbin1@huawei.com>
Signed-off-by: Xie Yuanbin <xieyuanbin1@huawei.com>
Signed-off-by: Liyuan Pang <pangliyuan1@huawei.com>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
2025-12-09 09:18:53 +00:00
Christophe JAILLET
8cef9b451d spi: microchip-core: Fix an error handling path in mchp_corespi_probe()
mchp_corespi_init() calls mchp_corespi_enable_ints(), so
mchp_corespi_disable_ints() should be called if an error occurs after
calling mchp_corespi_init(), as already done in the remove function.

Fixes: 059f545832be ("spi: add support for microchip "soft" spi controller")
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Link: https://patch.msgid.link/a7aaff1f28a83303a288de2914724a874fe1a11e.1764969247.git.christophe.jaillet@wanadoo.fr
Acked-by: Conor Dooley <conor.dooley@microchip.com>
Signed-off-by: Mark Brown <broonie@kernel.org>
2025-12-09 10:01:32 +09:00
Javier Martinez Canillas
99f0c3a654
regulator: spacemit: Align input supply name with the DT binding
The Device Tree binding schema for the SpacemiT P1 PMIC defines the main
input supply property as "vin-supply", but the driver defines the supply
name for BUCK and ALDO regulators as "vcc".

This causes the regulator core to lookup for a non-existent "vcc-supply".
Rename the supply from "vcc" to "vin", to match the DT binding and ensure
that the regulators input supplies are correctly resolved.

After this change, the regulators supply hierarchy is correctly reported:

  $ cat /sys/kernel/debug/regulator/regulator_summary
   regulator                      use open bypass opmode voltage current     min     max
  ---------------------------------------------------------------------------------------
   regulator-dummy                  1    0      0 unknown     0mV     0mA     0mV     0mV
   dc_in_12v                        2    1      0 unknown 12000mV     0mA 12000mV 12000mV
      vcc_4v                        7   10      0 unknown  4000mV     0mA  4000mV  4000mV
         buck1                      1    0      0 unknown  1050mV     0mA   500mV  3425mV
         buck2                      1    0      0 unknown   900mV     0mA   500mV  3425mV
         buck3                      1    0      0 unknown  1800mV     0mA   500mV  1800mV
         buck4                      1    0      0 unknown  3300mV     0mA   500mV  3300mV
         buck5                      3    7      0 unknown  2100mV     0mA   500mV  3425mV
            dldo1                   0    0      0 unknown  1200mV     0mA   500mV  3125mV
            dldo2                   0    0      0 unknown   500mV     0mA   500mV  3125mV
            dldo3                   0    0      0 unknown   500mV     0mA   500mV  3125mV
            dldo4                   1    0      0 unknown  1800mV     0mA   500mV  3125mV
            dldo5                   0    0      0 unknown   500mV     0mA   500mV  3125mV
            dldo6                   1    0      0 unknown  1800mV     0mA   500mV  3125mV
            dldo7                   0    0      0 unknown   500mV     0mA   500mV  3125mV
         buck6                      1    0      0 unknown  1100mV     0mA   500mV  3425mV
         aldo1                      0    0      0 unknown  1800mV     0mA   500mV  3125mV
         aldo2                      0    0      0 unknown   500mV     0mA   500mV  3125mV
         aldo3                      0    0      0 unknown   500mV     0mA   500mV  3125mV
         aldo4                      0    0      0 unknown   500mV     0mA   500mV  3125mV

Fixes: 8b84d712ad84 ("regulator: spacemit: support SpacemiT P1 regulators")
Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
Link: https://patch.msgid.link/20251206133852.1739475-1-javierm@redhat.com
Signed-off-by: Mark Brown <broonie@kernel.org>
2025-12-09 10:01:25 +09:00
Sam James
9aeed90419 alpha: don't reference obsolete termio struct for TC* constants
Similar in nature to ab107276607af90b13a5994997e19b7b9731e251. glibc-2.42
drops the legacy termio struct, but the ioctls.h header still defines some
TC* constants in terms of termio (via sizeof). Hardcode the values instead.

This fixes building Python for example, which falls over like:
  ./Modules/termios.c:1119:16: error: invalid application of 'sizeof' to incomplete type 'struct termio'

Link: https://bugs.gentoo.org/961769
Link: https://bugs.gentoo.org/962600
Signed-off-by: Sam James <sam@gentoo.org>
Reviewed-by: Magnus Lindholm <linmag7@gmail.com>
Link: https://lore.kernel.org/r/6ebd3451908785cad53b50ca6bc46cfe9d6bc03c.1764922497.git.sam@gentoo.org
Signed-off-by: Magnus Lindholm <linmag7@gmail.com>
2025-12-08 23:10:54 +01:00
Thomas Huth
3cec82b4fc alpha: Replace __ASSEMBLY__ with __ASSEMBLER__ in the alpha headers
While the GCC and Clang compilers already define __ASSEMBLER__
automatically when compiling assembly code, __ASSEMBLY__ is a
macro that only gets defined by the Makefiles in the kernel.
This can be very confusing when switching between userspace
and kernelspace coding, or when dealing with uapi headers that
rather should use __ASSEMBLER__ instead. So let's standardize now
on the __ASSEMBLER__ macro that is provided by the compilers.

This is a completely mechanical patch (done with a simple "sed -i"
statement).

Cc: Richard Henderson <richard.henderson@linaro.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: linux-alpha@vger.kernel.org
Signed-off-by: Thomas Huth <thuth@redhat.com>
Reviewed-by: Magnus Lindholm <linmag7@gmail.com>
Link: https://lore.kernel.org/r/20251121100044.282684-2-thuth@redhat.com
Signed-off-by: Magnus Lindholm <linmag7@gmail.com>
2025-12-08 23:10:30 +01:00
Heiko Carstens
70075e3d0c s390/bug: Add missing alignment
All objects are supposed to have a minimal alignment of two, since a
couple of instructions only work with even addresses. Add the missing
align statement for the file string.

Fixes: 6584ff203aec ("bugs/s390: Use 'cond_str' in __EMIT_BUG()")
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2025-12-08 15:42:41 +01:00
Heiko Carstens
1a82d430c5 s390/bug: Add missing CONFIG_BUG ifdef again
Fallback to generic BUG implementation in case CONFIG_BUG is disabled.
This restores the old behaviour before 'cond_str' support was added.

It probably doesn't matter, since nobody should disable CONFIG_BUG, but at
least this is consistent to before.

Fixes: 6584ff203aec ("bugs/s390: Use 'cond_str' in __EMIT_BUG()")
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2025-12-08 15:42:36 +01:00
Claudio Imbrenda
2f393c228c KVM: s390: Fix gmap_helper_zap_one_page() again
A few checks were missing in gmap_helper_zap_one_page(), which can lead
to memory corruption in the guest under specific circumstances.

Add the missing checks.

Fixes: 5deafa27d9ae ("KVM: s390: Fix to clear PTE when discarding a swapped page")
Cc: stable@vger.kernel.org
Reported-by: Marc Hartmayer <mhartmay@linux.ibm.com>
Tested-by: Marc Hartmayer <mhartmay@linux.ibm.com>
Acked-by: Christian Borntraeger <borntraeger@linux.ibm.com>
Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2025-12-08 13:06:14 +01:00
Hans de Goede
31b931bebd dma-mapping: Fix DMA_BIT_MASK() macro being broken
After commit a50f7456f853 ("dma-mapping: Allow use of DMA_BIT_MASK(64) in
global scope"), the DMA_BIT_MASK() macro is broken when passed non trivial
statements for the value of 'n'. This is caused by the new version missing
parenthesis around 'n' when evaluating 'n'.

One example of this breakage is the IPU6 driver now crashing due to
it getting DMA-addresses with address bit 32 set even though it has
tried to set a 32 bit DMA mask.

The IPU6 CSI2 engine has a DMA mask of either 31 or 32 bits depending
on if it is in secure mode or not and it sets this masks like this:

        mmu_info->aperture_end =
                (dma_addr_t)DMA_BIT_MASK(isp->secure_mode ?
                                         IPU6_MMU_ADDR_BITS :
                                         IPU6_MMU_ADDR_BITS_NON_SECURE);

So the 'n' argument here is "isp->secure_mode ? IPU6_MMU_ADDR_BITS :
IPU6_MMU_ADDR_BITS_NON_SECURE" which gets expanded into:

isp->secure_mode ? IPU6_MMU_ADDR_BITS : IPU6_MMU_ADDR_BITS_NON_SECURE - 1

With the -1 only being applied in the non secure case, causing
the secure mode mask to be one 1 bit too large.

Fixes: a50f7456f853 ("dma-mapping: Allow use of DMA_BIT_MASK(64) in global scope")
Cc: Sakari Ailus <sakari.ailus@linux.intel.com>
Cc: James Clark <james.clark@linaro.org>
Cc: Nathan Chancellor <nathan@kernel.org>
Cc: stable@vger.kernel.org
Signed-off-by: Hans de Goede <johannes.goede@oss.qualcomm.com>
Reviewed-by: Nathan Chancellor <nathan@kernel.org>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/20251207184756.97904-1-johannes.goede@oss.qualcomm.com
2025-12-08 09:40:57 +01:00
Dave Kleikamp
463d439bec dma/pool: eliminate alloc_pages warning in atomic_pool_expand
atomic_pool_expand iteratively tries the allocation while decrementing
the page order. There is no need to issue a warning if an attempted
allocation fails.

Signed-off-by: Dave Kleikamp <dave.kleikamp@oracle.com>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Fixes: d7e673ec2c8e ("dma-pool: Only allocate from CMA when in same memory zone")
[mszyprow: fixed typo]
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/20251202152810.142370-1-dave.kleikamp@oracle.com
2025-12-08 09:40:57 +01:00
Harry Yoo
0f35040de5 mm/slab: introduce kvfree_rcu_barrier_on_cache() for cache destruction
Currently, kvfree_rcu_barrier() flushes RCU sheaves across all slab
caches when a cache is destroyed. This is unnecessary; only the RCU
sheaves belonging to the cache being destroyed need to be flushed.

As suggested by Vlastimil Babka, introduce a weaker form of
kvfree_rcu_barrier() that operates on a specific slab cache.

Factor out flush_rcu_sheaves_on_cache() from flush_all_rcu_sheaves() and
call it from flush_all_rcu_sheaves() and kvfree_rcu_barrier_on_cache().

Call kvfree_rcu_barrier_on_cache() instead of kvfree_rcu_barrier() on
cache destruction.

The performance benefit is evaluated on a 12 core 24 threads AMD Ryzen
5900X machine (1 socket), by loading slub_kunit module.

Before:
  Total calls: 19
  Average latency (us): 18127
  Total time (us): 344414

After:
  Total calls: 19
  Average latency (us): 10066
  Total time (us): 191264

Two performance regression have been reported:
  - stress module loader test's runtime increases by 50-60% (Daniel)
  - internal graphics test's runtime on Tegra234 increases by 35% (Jon)

They are fixed by this change.

Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Fixes: ec66e0d59952 ("slab: add sheaf support for batching kfree_rcu() operations")
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/linux-mm/1bda09da-93be-4737-aef0-d47f8c5c9301@suse.cz
Reported-and-tested-by: Daniel Gomez <da.gomez@samsung.com>
Closes: https://lore.kernel.org/linux-mm/0406562e-2066-4cf8-9902-b2b0616dd742@kernel.org
Reported-and-tested-by: Jon Hunter <jonathanh@nvidia.com>
Closes: https://lore.kernel.org/linux-mm/e988eff6-1287-425e-a06c-805af5bbf262@nvidia.com
Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
Link: https://patch.msgid.link/20251207154148.117723-1-harry.yoo@oracle.com
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
2025-12-07 18:09:54 +01:00
Tobias Schumacher
f770950a47 s390/pci: Migrate s390 IRQ logic to IRQ domain API
s390 is one of the last architectures using the legacy API for setup and
teardown of PCI MSI IRQs. Migrate the s390 IRQ allocation and teardown
to the MSI parent domain API. For details, see:

https://lore.kernel.org/lkml/20221111120501.026511281@linutronix.de

In detail, create an MSI parent domain for each PCI domain. When a PCI
device sets up MSI or MSI-X IRQs, the library creates a per-device IRQ
domain for this device, which is used by the device for allocating and
freeing IRQs.

The per-device domain delegates this allocation and freeing to the
parent-domain. In the end, the corresponding callbacks of the parent
domain are responsible for allocating and freeing the IRQs.

The allocation is split into two parts:
- zpci_msi_prepare() is called once for each device and allocates the
  required resources. On s390, each PCI function has its own airq
  vector and a summary bit, which must be configured once per function.
  This is done in prepare().
- zpci_msi_alloc() can be called multiple times for allocating one or
  more MSI/MSI-X IRQs. This creates a mapping between the virtual IRQ
  number in the kernel and the hardware IRQ number.

Freeing is split into two counterparts:
- zpci_msi_free() reverts the effects of zpci_msi_alloc() and
- zpci_msi_teardown() reverts the effects of zpci_msi_prepare(). This is
  called once when all IRQs are freed before a device is removed.

Since the parent domain in the end allocates the IRQs, the hwirq
encoding must be unambiguous for all IRQs of all devices. This is
achieved by encoding the hwirq using the devfn and the MSI index.

Reviewed-by: Niklas Schnelle <schnelle@linux.ibm.com>
Reviewed-by: Farhan Ali <alifm@linux.ibm.com>
Signed-off-by: Tobias Schumacher <ts@linux.ibm.com>
Reviewed-by: Gerd Bayer <gbayer@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2025-12-07 16:15:23 +01:00
Tobias Schumacher
455a65260f genirq: Change hwirq parameter to irq_hw_number_t
The irqdomain implementation internally represents hardware IRQs as
irq_hw_number_t, which is defined as unsigned long int. When providing
an irq_hw_number_t to the generic_handle_domain() functions that expect
and unsigned int hwirq, this can lead to a loss of information. Change
the hwirq parameter to irq_hw_number_t to support the full range of
hwirqs.

Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Niklas Schnelle <schnelle@linux.ibm.com>
Reviewed-by: Farhan Ali <alifm@linux.ibm.com>
Signed-off-by: Tobias Schumacher <ts@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2025-12-07 16:15:22 +01:00
Heiko Carstens
eb9780a1a3 s390: Select POSIX_CPU_TIMERS_TASK_WORK
After support for VIRT_XFER_TO_GUEST_WORK is available for s390 it is
possible to also select HAVE_POSIX_CPU_TIMERS_TASK_WORK. See [1] for the
reasons why it makes sense, also for architectures which do not support
PREEMPT_RT.

[1] https://lore.kernel.org/all/20200716201923.228696399@linutronix.de

Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2025-12-07 16:15:19 +01:00
Vasily Gorbik
8543ecc0e0 s390: Unmap early KASAN shadow on memory offlining
Teach the memory hotplug path to tear down KASAN shadow that
was mapped during early boot when a memory block is offlined.

Track for each sclp_mem whether its range was covered by the early
KASAN shadow via an early_shadow_mapped flag. When such a block is
deconfigured and removed via sclp_config_mem_store(), compute the
corresponding shadow range and call vmemmap_free() to unmap the
boot mapped shadow, then clear the flag.

Using vmemmap_free() for the early shadow is safe despite the use
of large mappings in the boot-time KASAN setup. The initial shadow
is mapped with 1M and 2G pages, where possible. The minimum hotplug
memory block size is 128M and always aligned (the identity mapping
is at least 2G aligned), which corresponds to a 16M chunk of at
least 1M aligned shadow. PMD-mapped 1M shadow pages therefore
never need splitting, and PUD-mapped 2G shadow pages can now be
split following the preceding changes.

Relax the modify_pagetable() sanity check in vmem so that, with
KASAN enabled, it may also operate on the KASAN shadow region in
addition to the 1:1 mapping and vmemmap area. This allows the KASAN
shadow unmapping to reuse the common vmem helpers.

Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2025-12-07 16:15:19 +01:00
Vasily Gorbik
6a35d02fec s390/vmem: Support 2G page splitting for KASAN shadow freeing
Export split_pud_page() so it can be used from the vmem code and teach
modify_pud_table() to split PUD-sized mappings when only a subrange
needs to be removed.

If the range to be removed covers a full PUD-sized mapping, keep the
existing behavior: clear the PUD entry and free the backing large page
(for non-direct mappings). Otherwise, split the PUD-mapped page into
PMD mappings and let the walker handle the smaller ranges.

This is needed for KASAN early shadow removal support: memory hotplug
freeing the KASAN early shadow is the only expected caller that will
try to free 2G PUD-mapped regions of non-direct mappings.

Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2025-12-07 16:15:19 +01:00
Vasily Gorbik
1442bb87b8 s390/boot: Use entire page for PTEs
Make boot_pte_alloc() always allocate a full PAGE_SIZE page for
PTE tables, instead of carving two 2K PTE tables out of a single
4K page, similar to commit daa8af80d283 ("s390/mm: Allocate page
table with PAGE_SIZE granularity").

This mirrors the change in the vmem code and ensures that boot page
tables backing the early KASAN shadow can later be fully freed by
the vmem page-table teardown helpers (e.g. when unmapping early
KASAN shadow on memory hotplug).

The leftover-based allocation was originally added to reduce physmem
allocator fragmentation when EDAT was disabled. On current hardware
EDAT1 is available on all production systems, so the complexity is no
longer justified and gets in the way of freeing the shadow mappings.

Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2025-12-07 16:15:19 +01:00
Heiko Carstens
d9f5917801 s390/vmur: Use scnprintf() instead of sprintf()
Use scnprintf() instead of sprintf() for those cases where the destination
is an array and the size of the array is known at compile time.

This prevents theoretical buffer overflows, but also avoids that people
again and again spend time to figure out if the code is actually safe.

Reviewed-by: Jan Polensky <japo@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
2025-12-07 16:15:18 +01:00
Mark Brown
79a45ddcdb
regulator: fixed: Rely on the core freeing the enable GPIO
In order to simplify ownership rules for enable GPIOs supplied by drivers
regulator_register() always takes ownership of them, even if it ends up
failing for some other reason. We therefore should not free the GPIO if
registration fails but just let the core worry about things.

Fixes: 636f4618b1cd (regulator: fixed: fix GPIO descriptor leak on register failure)
Reported-by: Diederik de Haas <diederik@cknow-tech.com>
Closes: https://lore.kernel.org/r/DEPEYUF5BRGY.UKFBWRRE8HNP@cknow-tech.com
Tested-by: Diederik de Haas <diederik@cknow-tech.com>
Signed-off-by: Mark Brown <broonie@kernel.org>
Link: https://patch.msgid.link/20251204-regulator-fixed-fix-gpiod-leak-v1-1-48efea5b82c2@kernel.org
Signed-off-by: Mark Brown <broonie@kernel.org>
2025-12-05 14:23:48 +00:00
Bartosz Golaszewski
84c8097e67
regulator: check the return value of gpiod_set_value_cansleep()
gpiod_set_value_cansleep() now returns an integer and can indicate
failures in the GPIO layer. Propagate any potential errors to regulator
core.

Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
Link: https://patch.msgid.link/20251203084737.15891-1-bartosz.golaszewski@oss.qualcomm.com
Signed-off-by: Mark Brown <broonie@kernel.org>
2025-12-03 12:38:21 +00:00
Ali Tariq
e1f2e77624
spi: cadence-qspi: Fix runtime PM imbalance in probe
The probe function incorrectly calls pm_runtime_put_autosuspend()
twice in succession at the end of successful probe, dropping two
runtime PM references while only one was acquired earlier with
pm_runtime_get_sync(). This causes a usage count underflow:

    cadence-qspi 13010000.spi: Runtime PM usage count underflow!

Remove the first redundant pm_runtime_put_autosuspend() call to
balance the reference count.

Tested on StarFive VisionFive 2 v1.2A board.

Fixes: 30dbc1c8d50f ("spi: cadence-qspi: defer runtime support on socfpga if reset bit is enabled")

Signed-off-by: Ali Tariq <alitariq45892@gmail.com>
Link: https://patch.msgid.link/20251130091251.12120-1-alitariq45892@gmail.com
Signed-off-by: Mark Brown <broonie@kernel.org>
2025-12-02 13:37:55 +00:00
Sebastian Andrzej Siewior
fedadc4137 ARM: 9461/1: Disable HIGHPTE on PREEMPT_RT kernels
gup_pgd_range() is invoked with disabled interrupts and invokes
__kmap_local_page_prot() via pte_offset_map(), gup_p4d_range().
With HIGHPTE enabled, __kmap_local_page_prot() invokes kmap_high_get()
which uses a spinlock_t via lock_kmap_any(). This leads to an
sleeping-while-atomic error on PREEMPT_RT because spinlock_t becomes a
sleeping lock and must not be acquired in atomic context.

The loop in map_new_virtual() uses wait_queue_head_t for wake up which
also is using a spinlock_t.

Since HIGHPTE is rarely needed at all, turn it off for PREEMPT_RT
to allow the use of get_user_pages_fast().

[arnd: rework patch to turn off HIGHPTE instead of HAVE_PAST_GUP]

Co-developed-by: Arnd Bergmann <arnd@arndb.de>

Acked-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
2025-12-02 11:47:53 +00:00
Thomas Gleixner
256d97d358 ARM: 9459/1: Disable jump-label on PREEMPT_RT
jump-labels are used to efficiently switch between two possible code
paths. To achieve this, stop_machine() is used to keep the CPU in a
known state while the opcode is modified. The usage of stop_machine()
here leads to large latency spikes which can be observed on PREEMPT_RT.

Jump labels may change the target during runtime and are not restricted
to debug or "configuration/ setup" part of a PREEMPT_RT system where
high latencies could be defined as acceptable.

On 64-bit Arm, it is possible to use jump labels without the
stop_machine() call, which architecturally provides a way to atomically
change one 32-bit instruction word while keeping maintaining consistency,
but this is not generally the case on 32-bit, in particular in thumb2
mode.

Disable jump-label support on a PREEMPT_RT system when SMP is enabled.

[bigeasy: Patch description.]
[arnd: add !SMP case, extend changelog]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
2025-12-02 11:44:55 +00:00
34 changed files with 529 additions and 276 deletions

View File

@ -4,7 +4,7 @@
#include <uapi/asm/console.h>
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
extern long callback_puts(long unit, const char *s, long length);
extern long callback_getc(long unit);
extern long callback_open_console(void);
@ -26,5 +26,5 @@ struct crb_struct;
struct hwrpb_struct;
extern int callback_init_done;
extern void * callback_init(void *);
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLER__ */
#endif /* __AXP_CONSOLE_H */

View File

@ -6,7 +6,7 @@
#include <asm/pal.h>
#include <vdso/page.h>
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#define STRICT_MM_TYPECHECKS
@ -74,7 +74,7 @@ typedef struct page *pgtable_t;
#define PAGE_OFFSET 0xfffffc0000000000
#endif
#endif /* !__ASSEMBLY__ */
#endif /* !__ASSEMBLER__ */
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))

View File

@ -4,7 +4,7 @@
#include <uapi/asm/pal.h>
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
extern void halt(void) __attribute__((noreturn));
#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
@ -183,5 +183,5 @@ qemu_get_vmtime(void)
return v0;
}
#endif /* !__ASSEMBLY__ */
#endif /* !__ASSEMBLER__ */
#endif /* __ALPHA_PAL_H */

View File

@ -4,14 +4,14 @@
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
#include <asm/processor.h>
#include <asm/types.h>
#include <asm/hwrpb.h>
#include <asm/sysinfo.h>
#endif
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
struct thread_info {
struct pcb_struct pcb; /* palcode state */
@ -44,7 +44,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
register unsigned long *current_stack_pointer __asm__ ("$30");
#endif /* __ASSEMBLY__ */
#endif /* __ASSEMBLER__ */
/* Thread information allocation. */
#define THREAD_SIZE_ORDER 1
@ -110,7 +110,7 @@ register unsigned long *current_stack_pointer __asm__ ("$30");
put_user(res, (int __user *)(value)); \
})
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLER__
extern void __save_fpu(void);
static inline void save_fpu(void)

View File

@ -23,10 +23,10 @@
#define TCSETSW _IOW('t', 21, struct termios)
#define TCSETSF _IOW('t', 22, struct termios)
#define TCGETA _IOR('t', 23, struct termio)
#define TCSETA _IOW('t', 24, struct termio)
#define TCSETAW _IOW('t', 25, struct termio)
#define TCSETAF _IOW('t', 28, struct termio)
#define TCGETA 0x40127417
#define TCSETA 0x80127418
#define TCSETAW 0x80127419
#define TCSETAF 0x8012741c
#define TCSBRK _IO('t', 29)
#define TCXONC _IO('t', 30)

View File

@ -82,7 +82,7 @@ config ARM
select HAS_IOPORT
select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && (!PREEMPT_RT || !SMP)
select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
@ -1213,7 +1213,7 @@ config HIGHMEM
config HIGHPTE
bool "Allocate 2nd-level pagetables from highmem" if EXPERT
depends on HIGHMEM
depends on HIGHMEM && !PREEMPT_RT
default y
help
The VM uses one page of physical memory for each page table.

View File

@ -67,7 +67,7 @@ static inline unsigned long find_zero(unsigned long mask)
*/
static inline unsigned long load_unaligned_zeropad(const void *addr)
{
unsigned long ret, offset;
unsigned long ret, tmp;
/* Load word from unaligned pointer addr */
asm(
@ -75,9 +75,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
"2:\n"
" .pushsection .text.fixup,\"ax\"\n"
" .align 2\n"
"3: and %1, %2, #0x3\n"
" bic %2, %2, #0x3\n"
" ldr %0, [%2]\n"
"3: bic %1, %2, #0x3\n"
" ldr %0, [%1]\n"
" and %1, %2, #0x3\n"
" lsl %1, %1, #0x3\n"
#ifndef __ARMEB__
" lsr %0, %0, %1\n"
@ -90,7 +90,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
" .align 3\n"
" .long 1b, 3b\n"
" .popsection"
: "=&r" (ret), "=&r" (offset)
: "=&r" (ret), "=&r" (tmp)
: "r" (addr), "Qo" (*(unsigned long *)addr));
return ret;

View File

@ -19,10 +19,11 @@
#include <linux/init.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <linux/unaligned.h>
#include <asm/cp15.h>
#include <asm/system_info.h>
#include <linux/unaligned.h>
#include <asm/system_misc.h>
#include <asm/opcodes.h>
#include "fault.h"
@ -809,6 +810,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
int thumb2_32b = 0;
int fault;
if (addr >= TASK_SIZE && user_mode(regs))
harden_branch_predictor();
if (interrupts_enabled(regs))
local_irq_enable();

View File

@ -128,6 +128,19 @@ static inline bool is_translation_fault(unsigned int fsr)
return false;
}
static inline bool is_permission_fault(unsigned int fsr)
{
int fs = fsr_fs(fsr);
#ifdef CONFIG_ARM_LPAE
if ((fs & FS_MMU_NOLL_MASK) == FS_PERM_NOLL)
return true;
#else
if (fs == FS_L1_PERM || fs == FS_L2_PERM)
return true;
#endif
return false;
}
static void die_kernel_fault(const char *msg, struct mm_struct *mm,
unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
@ -162,6 +175,8 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
*/
if (addr < PAGE_SIZE) {
msg = "NULL pointer dereference";
} else if (is_permission_fault(fsr) && fsr & FSR_LNX_PF) {
msg = "execution of memory";
} else {
if (is_translation_fault(fsr) &&
kfence_handle_page_fault(addr, is_write_fault(fsr), regs))
@ -183,9 +198,6 @@ __do_user_fault(unsigned long addr, unsigned int fsr, unsigned int sig,
{
struct task_struct *tsk = current;
if (addr > TASK_SIZE)
harden_branch_predictor();
#ifdef CONFIG_DEBUG_USER
if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
@ -225,19 +237,6 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
}
#ifdef CONFIG_MMU
static inline bool is_permission_fault(unsigned int fsr)
{
int fs = fsr_fs(fsr);
#ifdef CONFIG_ARM_LPAE
if ((fs & FS_MMU_NOLL_MASK) == FS_PERM_NOLL)
return true;
#else
if (fs == FS_L1_PERM || fs == FS_L2_PERM)
return true;
#endif
return false;
}
#ifdef CONFIG_CPU_TTBR0_PAN
static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs)
{
@ -259,6 +258,37 @@ static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs)
}
#endif
static int __kprobes
do_kernel_address_page_fault(struct mm_struct *mm, unsigned long addr,
unsigned int fsr, struct pt_regs *regs)
{
if (user_mode(regs)) {
/*
* Fault from user mode for a kernel space address. User mode
* should not be faulting in kernel space, which includes the
* vector/khelper page. Handle the branch predictor hardening
* while interrupts are still disabled, then send a SIGSEGV.
*/
harden_branch_predictor();
__do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
} else {
/*
* Fault from kernel mode. Enable interrupts if they were
* enabled in the parent context. Section (upper page table)
* translation faults are handled via do_translation_fault(),
* so we will only get here for a non-present kernel space
* PTE or PTE permission fault. This may happen in exceptional
* circumstances and need the fixup tables to be walked.
*/
if (interrupts_enabled(regs))
local_irq_enable();
__do_kernel_fault(mm, addr, fsr, regs);
}
return 0;
}
static int __kprobes
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
@ -272,6 +302,12 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (kprobe_page_fault(regs, fsr))
return 0;
/*
* Handle kernel addresses faults separately, which avoids touching
* the mmap lock from contexts that are not able to sleep.
*/
if (addr >= TASK_SIZE)
return do_kernel_address_page_fault(mm, addr, fsr, regs);
/* Enable interrupts if they were enabled in the parent context. */
if (interrupts_enabled(regs))
@ -448,16 +484,20 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* We enter here because the first level page table doesn't contain
* a valid entry for the address.
*
* If the address is in kernel space (>= TASK_SIZE), then we are
* probably faulting in the vmalloc() area.
* If this is a user address (addr < TASK_SIZE), we handle this as a
* normal page fault. This leaves the remainder of the function to handle
* kernel address translation faults.
*
* If the init_task's first level page tables contains the relevant
* entry, we copy the it to this task. If not, we send the process
* a signal, fixup the exception, or oops the kernel.
* Since user mode is not permitted to access kernel addresses, pass these
* directly to do_kernel_address_page_fault() to handle.
*
* NOTE! We MUST NOT take any locks for this case. We may be in an
* interrupt or a critical region, and should only copy the information
* from the master page table, nothing more.
* Otherwise, we're probably faulting in the vmalloc() area, so try to fix
* that up. Note that we must not take any locks or enable interrupts in
* this case.
*
* If vmalloc() fixup fails, that means the non-leaf page tables did not
* contain an entry for this address, so handle this via
* do_kernel_address_page_fault().
*/
#ifdef CONFIG_MMU
static int __kprobes
@ -523,7 +563,8 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
return 0;
bad_area:
do_bad_area(addr, fsr, regs);
do_kernel_address_page_fault(current->mm, addr, fsr, regs);
return 0;
}
#else /* CONFIG_MMU */
@ -543,7 +584,16 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
/*
* If this is a kernel address, but from user mode, then userspace
* is trying bad stuff. Invoke the branch predictor handling.
* Interrupts are disabled here.
*/
if (addr >= TASK_SIZE && user_mode(regs))
harden_branch_predictor();
do_bad_area(addr, fsr, regs);
return 0;
}
#endif /* CONFIG_ARM_LPAE */

View File

@ -238,6 +238,7 @@ config S390
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
select HAVE_PREEMPT_DYNAMIC_KEY
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
@ -254,6 +255,7 @@ config S390
select HOTPLUG_SMT
select IOMMU_HELPER if PCI
select IOMMU_SUPPORT if PCI
select IRQ_MSI_LIB if PCI
select KASAN_VMALLOC if KASAN
select LOCK_MM_AND_FIND_VMA
select MMU_GATHER_MERGE_VMAS

View File

@ -244,22 +244,10 @@ static void *boot_crst_alloc(unsigned long val)
static pte_t *boot_pte_alloc(void)
{
static void *pte_leftover;
pte_t *pte;
/*
* handling pte_leftovers this way helps to avoid memory fragmentation
* during POPULATE_KASAN_MAP_SHADOW when EDAT is off
*/
if (!pte_leftover) {
pte_leftover = (void *)physmem_alloc_or_die(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
pte = pte_leftover + _PAGE_TABLE_SIZE;
__arch_set_page_dat(pte, 1);
} else {
pte = pte_leftover;
pte_leftover = NULL;
}
pte = (void *)physmem_alloc_or_die(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
__arch_set_page_dat(pte, 1);
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
return pte;
}

View File

@ -4,11 +4,14 @@
#include <linux/stringify.h>
#ifdef CONFIG_BUG
#ifndef CONFIG_DEBUG_BUGVERBOSE
#define _BUGVERBOSE_LOCATION(file, line)
#else
#define __BUGVERBOSE_LOCATION(file, line) \
.pushsection .rodata.str, "aMS", @progbits, 1; \
.align 2; \
10002: .ascii file "\0"; \
.popsection; \
\
@ -52,6 +55,8 @@ do { \
#define HAVE_ARCH_BUG
#endif /* CONFIG_BUG */
#include <asm-generic/bug.h>
#endif /* _ASM_S390_BUG_H */

View File

@ -166,6 +166,8 @@ static inline int page_reset_referenced(unsigned long addr)
return CC_TRANSFORM(cc);
}
int split_pud_page(pud_t *pudp, unsigned long addr);
/* Bits int the storage key */
#define _PAGE_CHANGED 0x02 /* HW changed bit */
#define _PAGE_REFERENCED 0x04 /* HW referenced bit */

View File

@ -5,6 +5,7 @@
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/iommu.h>
#include <linux/irqdomain.h>
#include <linux/pci_hotplug.h>
#include <asm/pci_clp.h>
#include <asm/pci_debug.h>
@ -109,6 +110,7 @@ struct zpci_bus {
struct list_head resources;
struct list_head bus_next;
struct resource bus_resource;
struct irq_domain *msi_parent_domain;
int topo; /* TID if topo_is_tid, PCHID otherwise */
int domain_nr;
u8 multifunction : 1;
@ -310,6 +312,9 @@ int zpci_dma_exit_device(struct zpci_dev *zdev);
/* IRQ */
int __init zpci_irq_init(void);
void __init zpci_irq_exit(void);
int zpci_set_irq(struct zpci_dev *zdev);
int zpci_create_parent_msi_domain(struct zpci_bus *zbus);
void zpci_remove_parent_msi_domain(struct zpci_bus *zbus);
/* FMB */
int zpci_fmb_enable_device(struct zpci_dev *);

View File

@ -47,6 +47,7 @@ static void ptep_zap_softleaf_entry(struct mm_struct *mm, softleaf_t entry)
void gmap_helper_zap_one_page(struct mm_struct *mm, unsigned long vmaddr)
{
struct vm_area_struct *vma;
unsigned long pgstev;
spinlock_t *ptl;
pgste_t pgste;
pte_t *ptep;
@ -65,9 +66,13 @@ void gmap_helper_zap_one_page(struct mm_struct *mm, unsigned long vmaddr)
if (pte_swap(*ptep)) {
preempt_disable();
pgste = pgste_get_lock(ptep);
pgstev = pgste_val(pgste);
ptep_zap_softleaf_entry(mm, softleaf_from_pte(*ptep));
pte_clear(mm, vmaddr, ptep);
if ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
(pgstev & _PGSTE_GPS_ZERO)) {
ptep_zap_softleaf_entry(mm, softleaf_from_pte(*ptep));
pte_clear(mm, vmaddr, ptep);
}
pgste_set_unlock(ptep, pgste);
preempt_enable();

View File

@ -204,7 +204,7 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
return rc;
}
static int split_pud_page(pud_t *pudp, unsigned long addr)
int split_pud_page(pud_t *pudp, unsigned long addr)
{
unsigned long pmd_addr, prot;
pmd_t *pm_dir, *pmdp;

View File

@ -330,10 +330,14 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
if (pud_leaf(*pud)) {
if (IS_ALIGNED(addr, PUD_SIZE) &&
IS_ALIGNED(next, PUD_SIZE)) {
if (!direct)
vmem_free_pages(pud_deref(*pud), get_order(PUD_SIZE), altmap);
pud_clear(pud);
pages++;
continue;
} else {
split_pud_page(pud, addr & PUD_MASK);
}
continue;
}
} else if (pud_none(*pud)) {
if (IS_ALIGNED(addr, PUD_SIZE) &&
@ -433,9 +437,15 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add,
if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
return -EINVAL;
/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
/* Don't mess with any tables not fully in 1:1 mapping, vmemmap & kasan area */
#ifdef CONFIG_KASAN
if (WARN_ON_ONCE(!(start >= KASAN_SHADOW_START && end <= KASAN_SHADOW_END) &&
end > __abs_lowcore))
return -EINVAL;
#else
if (WARN_ON_ONCE(end > __abs_lowcore))
return -EINVAL;
#endif
for (addr = start; addr < end; addr = next) {
next = pgd_addr_end(addr, end);
pgd = pgd_offset_k(addr);

View File

@ -708,6 +708,12 @@ int zpci_reenable_device(struct zpci_dev *zdev)
if (rc)
return rc;
if (zdev->msi_nr_irqs > 0) {
rc = zpci_set_irq(zdev);
if (rc)
return rc;
}
rc = zpci_iommu_register_ioat(zdev, &status);
if (rc)
zpci_disable_device(zdev);

View File

@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/irqdomain.h>
#include <linux/jump_label.h>
#include <linux/pci.h>
#include <linux/printk.h>
@ -198,19 +199,27 @@ static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *fr, s
zbus->multifunction = zpci_bus_is_multifunction_root(fr);
zbus->max_bus_speed = fr->max_bus_speed;
if (zpci_create_parent_msi_domain(zbus))
goto out_free_domain;
/*
* Note that the zbus->resources are taken over and zbus->resources
* is empty after a successful call
*/
bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, ops, zbus, &zbus->resources);
if (!bus) {
zpci_free_domain(zbus->domain_nr);
return -EFAULT;
}
if (!bus)
goto out_remove_msi_domain;
zbus->bus = bus;
dev_set_msi_domain(&zbus->bus->dev, zbus->msi_parent_domain);
return 0;
out_remove_msi_domain:
zpci_remove_parent_msi_domain(zbus);
out_free_domain:
zpci_free_domain(zbus->domain_nr);
return -ENOMEM;
}
static void zpci_bus_release(struct kref *kref)
@ -231,6 +240,7 @@ static void zpci_bus_release(struct kref *kref)
mutex_lock(&zbus_list_lock);
list_del(&zbus->bus_next);
mutex_unlock(&zbus_list_lock);
zpci_remove_parent_msi_domain(zbus);
kfree(zbus);
}

View File

@ -6,6 +6,7 @@
#include <linux/kernel_stat.h>
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/irqchip/irq-msi-lib.h>
#include <linux/smp.h>
#include <asm/isc.h>
@ -97,7 +98,7 @@ static int zpci_clear_directed_irq(struct zpci_dev *zdev)
}
/* Register adapter interruptions */
static int zpci_set_irq(struct zpci_dev *zdev)
int zpci_set_irq(struct zpci_dev *zdev)
{
int rc;
@ -125,27 +126,53 @@ static int zpci_clear_irq(struct zpci_dev *zdev)
static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *dest,
bool force)
{
struct msi_desc *entry = irq_data_get_msi_desc(data);
struct msi_msg msg = entry->msg;
int cpu_addr = smp_cpu_get_cpu_address(cpumask_first(dest));
msg.address_lo &= 0xff0000ff;
msg.address_lo |= (cpu_addr << 8);
pci_write_msi_msg(data->irq, &msg);
irq_data_update_affinity(data, dest);
return IRQ_SET_MASK_OK;
}
/*
* Encode the hwirq number for the parent domain. The encoding must be unique
* for each IRQ of each device in the parent domain, so it uses the devfn to
* identify the device and the msi_index to identify the IRQ within that device.
*/
static inline u32 zpci_encode_hwirq(u8 devfn, u16 msi_index)
{
return (devfn << 16) | msi_index;
}
static inline u16 zpci_decode_hwirq_msi_index(irq_hw_number_t hwirq)
{
return hwirq & 0xffff;
}
static void zpci_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
struct zpci_dev *zdev = to_zpci_dev(desc->dev);
if (irq_delivery == DIRECTED) {
int cpu = cpumask_first(irq_data_get_affinity_mask(data));
msg->address_lo = zdev->msi_addr & 0xff0000ff;
msg->address_lo |= (smp_cpu_get_cpu_address(cpu) << 8);
} else {
msg->address_lo = zdev->msi_addr & 0xffffffff;
}
msg->address_hi = zdev->msi_addr >> 32;
msg->data = zpci_decode_hwirq_msi_index(data->hwirq);
}
static struct irq_chip zpci_irq_chip = {
.name = "PCI-MSI",
.irq_unmask = pci_msi_unmask_irq,
.irq_mask = pci_msi_mask_irq,
.irq_compose_msi_msg = zpci_compose_msi_msg,
};
static void zpci_handle_cpu_local_irq(bool rescan)
{
struct airq_iv *dibv = zpci_ibv[smp_processor_id()];
union zpci_sic_iib iib = {{0}};
struct irq_domain *msi_domain;
irq_hw_number_t hwirq;
unsigned long bit;
int irqs_on = 0;
@ -163,7 +190,9 @@ static void zpci_handle_cpu_local_irq(bool rescan)
continue;
}
inc_irq_stat(IRQIO_MSI);
generic_handle_irq(airq_iv_get_data(dibv, bit));
hwirq = airq_iv_get_data(dibv, bit);
msi_domain = (struct irq_domain *)airq_iv_get_ptr(dibv, bit);
generic_handle_domain_irq(msi_domain, hwirq);
}
}
@ -228,6 +257,8 @@ static void zpci_floating_irq_handler(struct airq_struct *airq,
struct tpi_info *tpi_info)
{
union zpci_sic_iib iib = {{0}};
struct irq_domain *msi_domain;
irq_hw_number_t hwirq;
unsigned long si, ai;
struct airq_iv *aibv;
int irqs_on = 0;
@ -255,7 +286,9 @@ static void zpci_floating_irq_handler(struct airq_struct *airq,
break;
inc_irq_stat(IRQIO_MSI);
airq_iv_lock(aibv, ai);
generic_handle_irq(airq_iv_get_data(aibv, ai));
hwirq = airq_iv_get_data(aibv, ai);
msi_domain = (struct irq_domain *)airq_iv_get_ptr(aibv, ai);
generic_handle_domain_irq(msi_domain, hwirq);
airq_iv_unlock(aibv, ai);
}
}
@ -277,7 +310,9 @@ static int __alloc_airq(struct zpci_dev *zdev, int msi_vecs,
zdev->aisb = *bit;
/* Create adapter interrupt vector */
zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK, NULL);
zdev->aibv = airq_iv_create(msi_vecs,
AIRQ_IV_PTR | AIRQ_IV_DATA | AIRQ_IV_BITLOCK,
NULL);
if (!zdev->aibv)
return -ENOMEM;
@ -289,133 +324,6 @@ static int __alloc_airq(struct zpci_dev *zdev, int msi_vecs,
return 0;
}
int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
{
unsigned int hwirq, msi_vecs, irqs_per_msi, i, cpu;
struct zpci_dev *zdev = to_zpci(pdev);
struct msi_desc *msi;
struct msi_msg msg;
unsigned long bit;
int cpu_addr;
int rc, irq;
zdev->aisb = -1UL;
zdev->msi_first_bit = -1U;
msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
if (msi_vecs < nvec) {
pr_info("%s requested %d irqs, allocate system limit of %d",
pci_name(pdev), nvec, zdev->max_msi);
}
rc = __alloc_airq(zdev, msi_vecs, &bit);
if (rc < 0)
return rc;
/*
* Request MSI interrupts:
* When using MSI, nvec_used interrupt sources and their irq
* descriptors are controlled through one msi descriptor.
* Thus the outer loop over msi descriptors shall run only once,
* while two inner loops iterate over the interrupt vectors.
* When using MSI-X, each interrupt vector/irq descriptor
* is bound to exactly one msi descriptor (nvec_used is one).
* So the inner loops are executed once, while the outer iterates
* over the MSI-X descriptors.
*/
hwirq = bit;
msi_for_each_desc(msi, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
if (hwirq - bit >= msi_vecs)
break;
irqs_per_msi = min_t(unsigned int, msi_vecs, msi->nvec_used);
irq = __irq_alloc_descs(-1, 0, irqs_per_msi, 0, THIS_MODULE,
(irq_delivery == DIRECTED) ?
msi->affinity : NULL);
if (irq < 0)
return -ENOMEM;
for (i = 0; i < irqs_per_msi; i++) {
rc = irq_set_msi_desc_off(irq, i, msi);
if (rc)
return rc;
irq_set_chip_and_handler(irq + i, &zpci_irq_chip,
handle_percpu_irq);
}
msg.data = hwirq - bit;
if (irq_delivery == DIRECTED) {
if (msi->affinity)
cpu = cpumask_first(&msi->affinity->mask);
else
cpu = 0;
cpu_addr = smp_cpu_get_cpu_address(cpu);
msg.address_lo = zdev->msi_addr & 0xff0000ff;
msg.address_lo |= (cpu_addr << 8);
for_each_possible_cpu(cpu) {
for (i = 0; i < irqs_per_msi; i++)
airq_iv_set_data(zpci_ibv[cpu],
hwirq + i, irq + i);
}
} else {
msg.address_lo = zdev->msi_addr & 0xffffffff;
for (i = 0; i < irqs_per_msi; i++)
airq_iv_set_data(zdev->aibv, hwirq + i, irq + i);
}
msg.address_hi = zdev->msi_addr >> 32;
pci_write_msi_msg(irq, &msg);
hwirq += irqs_per_msi;
}
zdev->msi_first_bit = bit;
zdev->msi_nr_irqs = hwirq - bit;
rc = zpci_set_irq(zdev);
if (rc)
return rc;
return (zdev->msi_nr_irqs == nvec) ? 0 : zdev->msi_nr_irqs;
}
void arch_teardown_msi_irqs(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
struct msi_desc *msi;
unsigned int i;
int rc;
/* Disable interrupts */
rc = zpci_clear_irq(zdev);
if (rc)
return;
/* Release MSI interrupts */
msi_for_each_desc(msi, &pdev->dev, MSI_DESC_ASSOCIATED) {
for (i = 0; i < msi->nvec_used; i++) {
irq_set_msi_desc(msi->irq + i, NULL);
irq_free_desc(msi->irq + i);
}
msi->msg.address_lo = 0;
msi->msg.address_hi = 0;
msi->msg.data = 0;
msi->irq = 0;
}
if (zdev->aisb != -1UL) {
zpci_ibv[zdev->aisb] = NULL;
airq_iv_free_bit(zpci_sbv, zdev->aisb);
zdev->aisb = -1UL;
}
if (zdev->aibv) {
airq_iv_release(zdev->aibv);
zdev->aibv = NULL;
}
if ((irq_delivery == DIRECTED) && zdev->msi_first_bit != -1U)
airq_iv_free(zpci_ibv[0], zdev->msi_first_bit, zdev->msi_nr_irqs);
}
bool arch_restore_msi_irqs(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
@ -429,6 +337,207 @@ static struct airq_struct zpci_airq = {
.isc = PCI_ISC,
};
static void zpci_msi_teardown_directed(struct zpci_dev *zdev)
{
airq_iv_free(zpci_ibv[0], zdev->msi_first_bit, zdev->max_msi);
zdev->msi_first_bit = -1U;
zdev->msi_nr_irqs = 0;
}
static void zpci_msi_teardown_floating(struct zpci_dev *zdev)
{
airq_iv_release(zdev->aibv);
zdev->aibv = NULL;
airq_iv_free_bit(zpci_sbv, zdev->aisb);
zdev->aisb = -1UL;
zdev->msi_first_bit = -1U;
zdev->msi_nr_irqs = 0;
}
static void zpci_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *arg)
{
struct zpci_dev *zdev = to_zpci_dev(domain->dev);
zpci_clear_irq(zdev);
if (irq_delivery == DIRECTED)
zpci_msi_teardown_directed(zdev);
else
zpci_msi_teardown_floating(zdev);
}
static int zpci_msi_prepare(struct irq_domain *domain,
struct device *dev, int nvec,
msi_alloc_info_t *info)
{
struct zpci_dev *zdev = to_zpci_dev(dev);
struct pci_dev *pdev = to_pci_dev(dev);
unsigned long bit;
int msi_vecs, rc;
msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
if (msi_vecs < nvec) {
pr_info("%s requested %d IRQs, allocate system limit of %d\n",
pci_name(pdev), nvec, zdev->max_msi);
}
rc = __alloc_airq(zdev, msi_vecs, &bit);
if (rc) {
pr_err("Allocating adapter IRQs for %s failed\n", pci_name(pdev));
return rc;
}
zdev->msi_first_bit = bit;
zdev->msi_nr_irqs = msi_vecs;
rc = zpci_set_irq(zdev);
if (rc) {
pr_err("Registering adapter IRQs for %s failed\n",
pci_name(pdev));
if (irq_delivery == DIRECTED)
zpci_msi_teardown_directed(zdev);
else
zpci_msi_teardown_floating(zdev);
return rc;
}
return 0;
}
static int zpci_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
struct msi_desc *desc = ((msi_alloc_info_t *)args)->desc;
struct zpci_dev *zdev = to_zpci_dev(desc->dev);
struct zpci_bus *zbus = zdev->zbus;
unsigned int cpu, hwirq;
unsigned long bit;
int i;
bit = zdev->msi_first_bit + desc->msi_index;
hwirq = zpci_encode_hwirq(zdev->devfn, desc->msi_index);
if (desc->msi_index + nr_irqs > zdev->max_msi)
return -EINVAL;
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, hwirq + i,
&zpci_irq_chip, zdev,
handle_percpu_irq, NULL, NULL);
if (irq_delivery == DIRECTED) {
for_each_possible_cpu(cpu) {
airq_iv_set_ptr(zpci_ibv[cpu], bit + i,
(unsigned long)zbus->msi_parent_domain);
airq_iv_set_data(zpci_ibv[cpu], bit + i, hwirq + i);
}
} else {
airq_iv_set_ptr(zdev->aibv, bit + i,
(unsigned long)zbus->msi_parent_domain);
airq_iv_set_data(zdev->aibv, bit + i, hwirq + i);
}
}
return 0;
}
static void zpci_msi_clear_airq(struct irq_data *d, int i)
{
struct msi_desc *desc = irq_data_get_msi_desc(d);
struct zpci_dev *zdev = to_zpci_dev(desc->dev);
unsigned long bit;
unsigned int cpu;
u16 msi_index;
msi_index = zpci_decode_hwirq_msi_index(d->hwirq);
bit = zdev->msi_first_bit + msi_index;
if (irq_delivery == DIRECTED) {
for_each_possible_cpu(cpu) {
airq_iv_set_ptr(zpci_ibv[cpu], bit + i, 0);
airq_iv_set_data(zpci_ibv[cpu], bit + i, 0);
}
} else {
airq_iv_set_ptr(zdev->aibv, bit + i, 0);
airq_iv_set_data(zdev->aibv, bit + i, 0);
}
}
static void zpci_msi_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct irq_data *d;
int i;
for (i = 0; i < nr_irqs; i++) {
d = irq_domain_get_irq_data(domain, virq + i);
zpci_msi_clear_airq(d, i);
irq_domain_reset_irq_data(d);
}
}
static const struct irq_domain_ops zpci_msi_domain_ops = {
.alloc = zpci_msi_domain_alloc,
.free = zpci_msi_domain_free,
};
static bool zpci_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
struct irq_domain *real_parent,
struct msi_domain_info *info)
{
if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
return false;
info->ops->msi_prepare = zpci_msi_prepare;
info->ops->msi_teardown = zpci_msi_teardown;
return true;
}
static struct msi_parent_ops zpci_msi_parent_ops = {
.supported_flags = MSI_GENERIC_FLAGS_MASK |
MSI_FLAG_PCI_MSIX |
MSI_FLAG_MULTI_PCI_MSI,
.required_flags = MSI_FLAG_USE_DEF_DOM_OPS |
MSI_FLAG_USE_DEF_CHIP_OPS,
.init_dev_msi_info = zpci_init_dev_msi_info,
};
int zpci_create_parent_msi_domain(struct zpci_bus *zbus)
{
char fwnode_name[18];
snprintf(fwnode_name, sizeof(fwnode_name), "ZPCI_MSI_DOM_%04x", zbus->domain_nr);
struct irq_domain_info info = {
.fwnode = irq_domain_alloc_named_fwnode(fwnode_name),
.ops = &zpci_msi_domain_ops,
};
if (!info.fwnode) {
pr_err("Failed to allocate fwnode for MSI IRQ domain\n");
return -ENOMEM;
}
if (irq_delivery == FLOATING)
zpci_msi_parent_ops.required_flags |= MSI_FLAG_NO_AFFINITY;
zbus->msi_parent_domain = msi_create_parent_irq_domain(&info, &zpci_msi_parent_ops);
if (!zbus->msi_parent_domain) {
irq_domain_free_fwnode(info.fwnode);
pr_err("Failed to create MSI IRQ domain\n");
return -ENOMEM;
}
return 0;
}
void zpci_remove_parent_msi_domain(struct zpci_bus *zbus)
{
struct fwnode_handle *fn;
fn = zbus->msi_parent_domain->fwnode;
irq_domain_remove(zbus->msi_parent_domain);
irq_domain_free_fwnode(fn);
}
static void __init cpu_enable_directed_irq(void *unused)
{
union zpci_sic_iib iib = {{0}};
@ -465,6 +574,7 @@ static int __init zpci_directed_irq_init(void)
* is only done on the first vector.
*/
zpci_ibv[cpu] = airq_iv_create(cache_line_size() * BITS_PER_BYTE,
AIRQ_IV_PTR |
AIRQ_IV_DATA |
AIRQ_IV_CACHELINE |
(!cpu ? AIRQ_IV_ALLOC : 0), NULL);

View File

@ -2823,14 +2823,18 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev)
static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable)
{
struct regulator_enable_gpio *pin = rdev->ena_pin;
int ret;
if (!pin)
return -EINVAL;
if (enable) {
/* Enable GPIO at initial use */
if (pin->enable_count == 0)
gpiod_set_value_cansleep(pin->gpiod, 1);
if (pin->enable_count == 0) {
ret = gpiod_set_value_cansleep(pin->gpiod, 1);
if (ret)
return ret;
}
pin->enable_count++;
} else {
@ -2841,7 +2845,10 @@ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable)
/* Disable GPIO if not used */
if (pin->enable_count <= 1) {
gpiod_set_value_cansleep(pin->gpiod, 0);
ret = gpiod_set_value_cansleep(pin->gpiod, 0);
if (ret)
return ret;
pin->enable_count = 0;
}
}

View File

@ -330,13 +330,10 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
drvdata->dev = devm_regulator_register(&pdev->dev, &drvdata->desc,
&cfg);
if (IS_ERR(drvdata->dev)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(drvdata->dev),
"Failed to register regulator: %ld\n",
PTR_ERR(drvdata->dev));
gpiod_put(cfg.ena_gpiod);
return ret;
}
if (IS_ERR(drvdata->dev))
return dev_err_probe(&pdev->dev, PTR_ERR(drvdata->dev),
"Failed to register regulator: %ld\n",
PTR_ERR(drvdata->dev));
platform_set_drvdata(pdev, drvdata);

View File

@ -87,10 +87,10 @@ static const struct linear_range p1_ldo_ranges[] = {
}
#define P1_BUCK_DESC(_n) \
P1_REG_DESC(BUCK, buck, _n, "vcc", 0x47, BUCK_MASK, 254, p1_buck_ranges)
P1_REG_DESC(BUCK, buck, _n, "vin", 0x47, BUCK_MASK, 254, p1_buck_ranges)
#define P1_ALDO_DESC(_n) \
P1_REG_DESC(ALDO, aldo, _n, "vcc", 0x5b, LDO_MASK, 117, p1_ldo_ranges)
P1_REG_DESC(ALDO, aldo, _n, "vin", 0x5b, LDO_MASK, 117, p1_ldo_ranges)
#define P1_DLDO_DESC(_n) \
P1_REG_DESC(DLDO, dldo, _n, "buck5", 0x67, LDO_MASK, 117, p1_ldo_ranges)

View File

@ -44,6 +44,9 @@ struct sclp_mem {
unsigned int id;
unsigned int memmap_on_memory;
unsigned int config;
#ifdef CONFIG_KASAN
unsigned int early_shadow_mapped;
#endif
};
struct sclp_mem_arg {
@ -244,6 +247,16 @@ static ssize_t sclp_config_mem_store(struct kobject *kobj, struct kobj_attribute
put_device(&mem->dev);
sclp_mem_change_state(addr, block_size, 0);
__remove_memory(addr, block_size);
#ifdef CONFIG_KASAN
if (sclp_mem->early_shadow_mapped) {
unsigned long start, end;
start = (unsigned long)kasan_mem_to_shadow(__va(addr));
end = start + (block_size >> KASAN_SHADOW_SCALE_SHIFT);
vmemmap_free(start, end, NULL);
sclp_mem->early_shadow_mapped = 0;
}
#endif
WRITE_ONCE(sclp_mem->config, 0);
}
out_unlock:
@ -316,6 +329,9 @@ static int sclp_create_mem(struct sclp_mem *sclp_mem, struct kset *kset,
sclp_mem->memmap_on_memory = memmap_on_memory;
sclp_mem->config = config;
#ifdef CONFIG_KASAN
sclp_mem->early_shadow_mapped = config;
#endif
sclp_mem->id = id;
kobject_init(&sclp_mem->kobj, &ktype);
rc = kobject_add(&sclp_mem->kobj, &kset->kobj, "memory%d", id);

View File

@ -154,7 +154,7 @@ static struct urdev *urdev_get_from_devno(u16 devno)
struct ccw_device *cdev;
struct urdev *urd;
sprintf(bus_id, "0.0.%04x", devno);
scnprintf(bus_id, sizeof(bus_id), "0.0.%04x", devno);
cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
if (!cdev)
return NULL;
@ -904,11 +904,11 @@ static int ur_set_online(struct ccw_device *cdev)
goto fail_free_cdev;
if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
if (urd->class == DEV_CLASS_UR_I)
sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev));
scnprintf(node_id, sizeof(node_id), "vmrdr-%s", dev_name(&cdev->dev));
if (urd->class == DEV_CLASS_UR_O)
sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev));
scnprintf(node_id, sizeof(node_id), "vmpun-%s", dev_name(&cdev->dev));
} else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev));
scnprintf(node_id, sizeof(node_id), "vmprt-%s", dev_name(&cdev->dev));
} else {
rc = -EOPNOTSUPP;
goto fail_free_cdev;

View File

@ -387,6 +387,7 @@ static int mchp_corespi_probe(struct platform_device *pdev)
ret = devm_spi_register_controller(dev, host);
if (ret) {
mchp_corespi_disable_ints(spi);
mchp_corespi_disable(spi);
return dev_err_probe(dev, ret, "unable to register host for CoreSPI controller\n");
}

View File

@ -90,7 +90,7 @@
*/
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
#define DMA_BIT_MASK(n) GENMASK_ULL(n - 1, 0)
#define DMA_BIT_MASK(n) GENMASK_ULL((n) - 1, 0)
struct dma_iova_state {
dma_addr_t addr;

View File

@ -182,9 +182,9 @@ int generic_handle_irq_safe(unsigned int irq);
* and handle the result interrupt number. Return -EINVAL if
* conversion failed.
*/
int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq);
int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq);
int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq);
int generic_handle_domain_irq(struct irq_domain *domain, irq_hw_number_t hwirq);
int generic_handle_domain_irq_safe(struct irq_domain *domain, irq_hw_number_t hwirq);
int generic_handle_domain_nmi(struct irq_domain *domain, irq_hw_number_t hwirq);
#endif
/* Test to see if a driver has successfully requested an irq */

View File

@ -1150,10 +1150,17 @@ static inline void kvfree_rcu_barrier(void)
rcu_barrier();
}
static inline void kvfree_rcu_barrier_on_cache(struct kmem_cache *s)
{
rcu_barrier();
}
static inline void kfree_rcu_scheduler_running(void) { }
#else
void kvfree_rcu_barrier(void);
void kvfree_rcu_barrier_on_cache(struct kmem_cache *s);
void kfree_rcu_scheduler_running(void);
#endif

View File

@ -93,7 +93,7 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
page = dma_alloc_from_contiguous(NULL, 1 << order,
order, false);
if (!page)
page = alloc_pages(gfp, order);
page = alloc_pages(gfp | __GFP_NOWARN, order);
} while (!page && order-- > 0);
if (!page)
goto out;

View File

@ -720,7 +720,7 @@ EXPORT_SYMBOL_GPL(generic_handle_irq_safe);
* This function must be called from an IRQ context with irq regs
* initialized.
*/
int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq)
int generic_handle_domain_irq(struct irq_domain *domain, irq_hw_number_t hwirq)
{
return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
}
@ -738,7 +738,7 @@ EXPORT_SYMBOL_GPL(generic_handle_domain_irq);
* context). If the interrupt is marked as 'enforce IRQ-context only' then
* the function must be invoked from hard interrupt context.
*/
int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq)
int generic_handle_domain_irq_safe(struct irq_domain *domain, irq_hw_number_t hwirq)
{
unsigned long flags;
int ret;
@ -761,7 +761,7 @@ EXPORT_SYMBOL_GPL(generic_handle_domain_irq_safe);
* This function must be called from an NMI context with irq regs
* initialized.
**/
int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq)
int generic_handle_domain_nmi(struct irq_domain *domain, irq_hw_number_t hwirq)
{
WARN_ON_ONCE(!in_nmi());
return handle_irq_desc(irq_resolve_mapping(domain, hwirq));

View File

@ -422,6 +422,7 @@ static inline bool is_kmalloc_normal(struct kmem_cache *s)
bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj);
void flush_all_rcu_sheaves(void);
void flush_rcu_sheaves_on_cache(struct kmem_cache *s);
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
SLAB_CACHE_DMA32 | SLAB_PANIC | \

View File

@ -492,7 +492,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
return;
/* in-flight kfree_rcu()'s may include objects from our cache */
kvfree_rcu_barrier();
kvfree_rcu_barrier_on_cache(s);
if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) &&
(s->flags & SLAB_TYPESAFE_BY_RCU)) {
@ -2038,25 +2038,13 @@ unlock_return:
}
EXPORT_SYMBOL_GPL(kvfree_call_rcu);
/**
* kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
*
* Note that a single argument of kvfree_rcu() call has a slow path that
* triggers synchronize_rcu() following by freeing a pointer. It is done
* before the return from the function. Therefore for any single-argument
* call that will result in a kfree() to a cache that is to be destroyed
* during module exit, it is developer's responsibility to ensure that all
* such calls have returned before the call to kmem_cache_destroy().
*/
void kvfree_rcu_barrier(void)
static inline void __kvfree_rcu_barrier(void)
{
struct kfree_rcu_cpu_work *krwp;
struct kfree_rcu_cpu *krcp;
bool queued;
int i, cpu;
flush_all_rcu_sheaves();
/*
* Firstly we detach objects and queue them over an RCU-batch
* for all CPUs. Finally queued works are flushed for each CPU.
@ -2118,8 +2106,43 @@ void kvfree_rcu_barrier(void)
}
}
}
/**
* kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete.
*
* Note that a single argument of kvfree_rcu() call has a slow path that
* triggers synchronize_rcu() following by freeing a pointer. It is done
* before the return from the function. Therefore for any single-argument
* call that will result in a kfree() to a cache that is to be destroyed
* during module exit, it is developer's responsibility to ensure that all
* such calls have returned before the call to kmem_cache_destroy().
*/
void kvfree_rcu_barrier(void)
{
flush_all_rcu_sheaves();
__kvfree_rcu_barrier();
}
EXPORT_SYMBOL_GPL(kvfree_rcu_barrier);
/**
* kvfree_rcu_barrier_on_cache - Wait for in-flight kvfree_rcu() calls on a
* specific slab cache.
* @s: slab cache to wait for
*
* See the description of kvfree_rcu_barrier() for details.
*/
void kvfree_rcu_barrier_on_cache(struct kmem_cache *s)
{
if (s->cpu_sheaves)
flush_rcu_sheaves_on_cache(s);
/*
* TODO: Introduce a version of __kvfree_rcu_barrier() that works
* on a specific slab cache.
*/
__kvfree_rcu_barrier();
}
EXPORT_SYMBOL_GPL(kvfree_rcu_barrier_on_cache);
static unsigned long
kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
@ -2215,4 +2238,3 @@ void __init kvfree_rcu_init(void)
}
#endif /* CONFIG_KVFREE_RCU_BATCHED */

View File

@ -4122,42 +4122,47 @@ static void flush_rcu_sheaf(struct work_struct *w)
/* needed for kvfree_rcu_barrier() */
void flush_all_rcu_sheaves(void)
void flush_rcu_sheaves_on_cache(struct kmem_cache *s)
{
struct slub_flush_work *sfw;
struct kmem_cache *s;
unsigned int cpu;
mutex_lock(&flush_lock);
for_each_online_cpu(cpu) {
sfw = &per_cpu(slub_flush, cpu);
/*
* we don't check if rcu_free sheaf exists - racing
* __kfree_rcu_sheaf() might have just removed it.
* by executing flush_rcu_sheaf() on the cpu we make
* sure the __kfree_rcu_sheaf() finished its call_rcu()
*/
INIT_WORK(&sfw->work, flush_rcu_sheaf);
sfw->s = s;
queue_work_on(cpu, flushwq, &sfw->work);
}
for_each_online_cpu(cpu) {
sfw = &per_cpu(slub_flush, cpu);
flush_work(&sfw->work);
}
mutex_unlock(&flush_lock);
}
void flush_all_rcu_sheaves(void)
{
struct kmem_cache *s;
cpus_read_lock();
mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) {
if (!s->cpu_sheaves)
continue;
mutex_lock(&flush_lock);
for_each_online_cpu(cpu) {
sfw = &per_cpu(slub_flush, cpu);
/*
* we don't check if rcu_free sheaf exists - racing
* __kfree_rcu_sheaf() might have just removed it.
* by executing flush_rcu_sheaf() on the cpu we make
* sure the __kfree_rcu_sheaf() finished its call_rcu()
*/
INIT_WORK(&sfw->work, flush_rcu_sheaf);
sfw->s = s;
queue_work_on(cpu, flushwq, &sfw->work);
}
for_each_online_cpu(cpu) {
sfw = &per_cpu(slub_flush, cpu);
flush_work(&sfw->work);
}
mutex_unlock(&flush_lock);
flush_rcu_sheaves_on_cache(s);
}
mutex_unlock(&slab_mutex);