1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 09:00:12 +00:00

Compare commits

...

83 Commits

Author SHA1 Message Date
Linus Torvalds
372800cb95 for-6.19-rc4-tag
-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAmlhIx0ACgkQxWXV+ddt
 WDszVRAAik5HCuE0AwSmn/9VlN4ZFIZDKNUF1RnctsJuKkqEVTXLvpf7exM+C+WR
 HKTNlRxCcKikAgtk4vlh7XI3bydaeYZpxyQmfVqn1prBXvL1NUPzdKjCOkhQL9yi
 9uJAGPjekrVGc/l805DlQxZ19Jzc/HxWV6/uUBO2djKNVXOljxFK9IIL6rHHzwHe
 pMR6n0zkdDUUxz0+x6BaJfvz1Vn8HyNd64MgVsOYsguaOmyHpn3/gxQ17jMcIvKu
 Zh3Y+3vmZBdHP4t9USi0whc5tKiM2xOAzYm10ZQLhXDFWXNWnIoLmX+QrDhPZiyJ
 r99ILwFBpoxHyuACNhZJK6YaNaukp97pmgfhO8k226FpAkTsBymdSYp5il8utAIE
 /mBulniqkDKA2XTyv+KtRHPCeStUK427t/ZDTzmlBxYxtdcLSvrKtiXempYQ41vt
 SQZGOt4psEhR7ZuFvFI7TgtbPoEq7z2O2WukX1ujx/9gjjZTLppst7EnKfdsRT8B
 KygNl8W4wWo3yhP8RSlt8XBfF+KxCO75HnpBm78yoJqxOgle6h+Vx2WzsVi06o6R
 06SzEHkzNMWW6Tj68xvGcm0DTmScvY5uiElf9MgyZBY69I7/q4U8SnnlTnwiru6g
 wNaDLT0y2FWrH+RhKqUmWbWQGDyKdAdA5rZdI9eXoJRNxegOMkA=
 =Euzv
 -----END PGP SIGNATURE-----

Merge tag 'for-6.19-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:

 - fix potential NULL pointer dereference when replaying tree log after
   an error

 - release path before initializing extent tree to avoid potential
   deadlock when allocating new inode

 - on filesystems with block size > page size
    - fix potential read out of bounds during encoded read of an inline
      extent
    - only enforce free space tree if v1 cache is required

 - print correct tree id in error message

* tag 'for-6.19-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: show correct warning if can't read data reloc tree
  btrfs: fix NULL pointer dereference in do_abort_log_replay()
  btrfs: force free space tree for bs > ps cases
  btrfs: only enforce free space tree if v1 cache is required for bs < ps cases
  btrfs: release path before initializing extent tree in btrfs_read_locked_inode()
  btrfs: avoid access-beyond-folio for bs > ps encoded writes
2026-01-09 07:02:38 -10:00
Linus Torvalds
4d6fe1dd12 pci-v6.19-fixes-2
-----BEGIN PGP SIGNATURE-----
 
 iQJIBAABCgAyFiEEgMe7l+5h9hnxdsnuWYigwDrT+vwFAmlhJ10UHGJoZWxnYWFz
 QGdvb2dsZS5jb20ACgkQWYigwDrT+vzUhg//aXpxdbp4Y7UABwnTVEJTDMnSu7v0
 wd8f+zfhaORHv+rIRabrvy4qfCra/ZRRo6VBivB7lOJsl3QREYdnXGz/OZKidAq3
 TrzAV1EOwUg7lskhcVwbC6SEExWEtRx58WiRtGJxrMU6BnmulG3wBMpS6/cKrsn9
 VnXnZt6RwG5Ltioh9k6GW4z9uNTot/+edzN0GfQkVsuAS9O6jgqathhz1kh33VVH
 xZlopVwQC9YAQJXbLoAEHc5KkiE5KDQbFBBsPZBUxUIk1BUyyXCuf790rKUtwfR1
 vymi6TcdLsGuETy5UpxtZPNGp8MlnRYCj6NIIW2FgaPijzf8XINxefPrnmN9cMx+
 MFp0JtTJqqs/lf12pvfAcG102E2kvzl+Cv97ru+zsJviUeVlmtqCnku6DTMTeE9y
 acL0VLhZaZqrld2klYucZ1aYbANxnpGtRFzQ/ToUuJxeKlyvDBswF0+Ph8vzqFx4
 UwM6jLtLGrGeqEAXYSCQp3vLDI/ESeHXVLQOqwY3KrtySgCZO7IWX1eWiGJCKq92
 GRwZdRyIhfoM94P2dNPtdfPzG6pefOjxlR5f18gVRsWgqKE2FZgoVNcNJaIN7aGR
 OrmZ9DtyYgJkuDyKffbwDc1taGpEwjWwvvaN4zwx8DrachDPaSfGo91ui7Zpyvlq
 dKAc3JzSjCjVQK8=
 =pTRZ
 -----END PGP SIGNATURE-----

Merge tag 'pci-v6.19-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci

Pull PCI fixes from Bjorn Helgaas:

 - Remove ASPM L0s support for MSM8996 SoC since we now enable L0s when
   advertised, and it caused random hangs on this device (Manivannan
   Sadhasivam)

 - Fix meson-pcie to report that the link is up while in ASPM L0s or L1,
   since those are active states from the software point of view, and
   treating the link as down caused config access failures (Bjorn
   Helgaas)

 - Fix up sparc DTS BAR descriptions that are above 4GB but not marked
   as prefetchable, which caused resource assignment and driver probe
   failures after we converted from the SPARC pcibios_enable_device() to
   the generic version (Ilpo Järvinen)

* tag 'pci-v6.19-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci:
  sparc/PCI: Correct 64-bit non-pref -> pref BAR resources
  PCI: meson: Report that link is up while in ASPM L0s and L1 states
  PCI: qcom: Remove ASPM L0s support for MSM8996 SoC
2026-01-09 06:41:10 -10:00
Linus Torvalds
553410fcb9 ACPI support fix for 6.19-rc5
Fix the ACPI/PCI legacy interrupts (INTx) parsing in the cases when
 the ACPI Global System Interrupt (GSI) value is a 32-bit one with
 the MSB set that is interpreted as a negative integer and causes
 acpi_pci_link_allocate_irq() to fail and acpi_irq_get_penalty() to
 trigger an out-of-bounds array dereference (Lorenzo Pieralisi)
 -----BEGIN PGP SIGNATURE-----
 
 iQFGBAABCAAwFiEEcM8Aw/RY0dgsiRUR7l+9nS/U47UFAmlhHEMSHHJqd0Byand5
 c29ja2kubmV0AAoJEO5fvZ0v1OO1sSIIAK1NeCVdGg9YpInOmVSneDENsaWReo9c
 ZxJBKISVPkiwG+8jEbPgIJJcwAXGBgzYBOA/l2S8TkFjj3h6yRidKI2PUabW49KA
 LN0k1Xkts/W4EVvY4d2J5WdNCwzzPi+cxG5fDuL5izZNP4yKJxiOvEybUAmA6i9g
 EqKXnAme/qSGSfZKFltjKDu9OcV+Nq1MSezl5h4JffChIXyJ1vX5tGmM3ENxzwXN
 gBUMk3BKD1TnnmjVO8Cz1WV8oDszOY3Y2+8MBGkbB5P/6Kwkdf2Wh79voddozi3D
 i+kJxdAJBqsUFODprhIYmgYR5duvi8XOj6p94hLzu8yJvvStYcqudRs=
 =v1UK
 -----END PGP SIGNATURE-----

Merge tag 'acpi-6.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull ACPI support fix from Rafael Wysocki:
 "This fixes the ACPI/PCI legacy interrupts (INTx) parsing in the case
  when the ACPI Global System Interrupt (GSI) value is a 32-bit one with
  the MSB set.

  That was interpreted as a negative integer and caused
  acpi_pci_link_allocate_irq() to fail and acpi_irq_get_penalty() to
  trigger an out-of-bounds array dereference (Lorenzo Pieralisi)"

* tag 'acpi-6.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  ACPI: PCI: IRQ: Fix INTx GSIs signedness
2026-01-09 06:20:15 -10:00
Linus Torvalds
81c5ffec9e Power management fix for 6.19-rc5
Fix a crash in the hibernation image saving code that can be triggered
 when the given compression algorithm is unavailable (Malaya Kumar Rout)
 -----BEGIN PGP SIGNATURE-----
 
 iQFGBAABCAAwFiEEcM8Aw/RY0dgsiRUR7l+9nS/U47UFAmlhGicSHHJqd0Byand5
 c29ja2kubmV0AAoJEO5fvZ0v1OO1q1QH/A7sxNcitVM3I6bdoYyVDYpVnKiDQqTk
 ofzJ+eEEh2+5iD7iBoqQBVNLDWL3iOtSCk3u8VzhIoJMwvsmaxSQYqGaOMIHNSKx
 Lfdl+RsMv5LKK05uN9uxLCSIBQJkRhnKI3+AC2TEbSpmLwi0+W15XYHecj1JmyY0
 upsrRUq+XGBr2LQDoWiUmRmPay8+lp8zjoHaAorl7Jm4yr7pHV8kzrztrCOttuUx
 nUtJiCks+Srnm09uIf0ghVgzBTwnmAGYft/xOf+fRqJDy9t91Nf/oJsev28mFCsn
 qRVn/O4i8xAgba9mKiKWmHvzoqqW8omixDFShweeTdr7Lw4hADxMwn8=
 =8zHM
 -----END PGP SIGNATURE-----

Merge tag 'pm-6.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management fix from Rafael Wysocki:
 "This fixes a crash in the hibernation image saving code that can be
  triggered when the given compression algorithm is unavailable (Malaya
  Kumar Rout)"

* tag 'pm-6.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  PM: hibernate: Fix crash when freeing invalid crypto compressor
2026-01-09 06:18:05 -10:00
Linus Torvalds
a81668db9e gpio fixes for v6.19-rc5
- balance superio enter/exit calls in error path in gpio-it87
 - fix a race where we try to take the SRCU read lock of the GPIO device
   before it's been initialized causing a NULL-pointer dereference
 - fix handling of short-pulse interrupts in gpio-pca053x
 - fix a reference leak in error path in gpio-mpsse
 - mark the GPIO controller as sleeping (it calls sleeping functions) in
   gpio-rockchip
 - fix several issues in management of shared GPIOs
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEkeUTLeW1Rh17omX8BZ0uy/82hMMFAmlhBn4ACgkQBZ0uy/82
 hMMUpg//Vzmls7Xn/IT9Qx08yXaezZLl2+e+g2fGzeiQKojLUME4H8+vJIlsMQ9t
 pJ8mv5SqFIk1ScGZLrcPVE4hNi9dVvobX7KtwGsEcsTpQBri/ZhNn1MW+dmoPE8e
 vsIhWA63n5WB24IloM4xSILa49TNNladwKl4yhoHi2A3cWeCysRJncOJ3ZfJ/XXu
 MM9zzIVNnPpNy08zpwKlwy51a+nwsuPNofMARTnINMrEiIaI7Aw5zt+ecNJJCyvX
 Zr+mWxM8jp9bMPMdK2CmuCzKdEapnsoS7Et/RRO8UtcIRnfvigJbntc4o3b/Fk9L
 hJg95WYvwK8s7kXCj5ipoCSk2xFTYGxjSt2gmwR2/6qO+ogwpcnuRGncZuS+Wmz7
 pHN+Y1DEYuaCPbR7mR448yprD7FO8OTjY+a3y1sDXKb8LqxrSu+KT/noBZ8acZkh
 f7nRxXRjwI8dHYyzYppvoxyYNknFN/PAYi+P0V4xESLalIYycBOOrulM3dnpC6VM
 DxrsK0pdTtQNOHEh+Ru9EMB5cZyBMUPg4BIxZ3MvZfcC7kDjEB698galk2EXIslU
 bWIrJ7ayLJgkTIVDN8EAcwEWkdg4uFUhuWqQ/vyfydmIUqLq9Fs4NJVARM5B8ypz
 nmAx3Vvpg4W3swUUzm9xZNSPPi6k5fz6fCL8hzA90oCw/oSD/cE=
 =HN6d
 -----END PGP SIGNATURE-----

Merge tag 'gpio-fixes-for-v6.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux

Pull gpio fixes from Bartosz Golaszewski:
 "There are several ordinary driver fixes and a fix to a race between
  the registration of two chips that causes a crash in GPIO core.

  The bulk of the changed lines however, concerns the management of
  shared GPIOs that landed in v6.19-rc1. Enabling it for ARCH_QCOM
  enabled it in defconfig which effectively enabled it for all arm64
  platforms and exposed the code to quite a lot of testing (which is
  good, right? :)).

  As a resukt, I received a number of bug reports, which I progressively
  fixed over the course of last weeks. This explains the number of lines
  higher than what I normally aim for at this stage.

   - balance superio enter/exit calls in error path in gpio-it87

   - fix a race where we try to take the SRCU read lock of the GPIO
     device before it's been initialized causing a NULL-pointer
     dereference

   - fix handling of short-pulse interrupts in gpio-pca053x

   - fix a reference leak in error path in gpio-mpsse

   - mark the GPIO controller as sleeping (it calls sleeping functions)
     in gpio-rockchip

   - fix several issues in management of shared GPIOs"

* tag 'gpio-fixes-for-v6.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux:
  gpio: shared: fix a false-positive sharing detection with reset-gpios
  gpiolib: fix lookup table matching
  gpio: shared: don't allocate the lookup table until we really need it
  gpio: shared: fix a race condition
  gpio: shared: assign the correct firmware node for reset-gpio use-case
  gpio: rockchip: mark the GPIO controller as sleeping
  gpio: mpsse: fix reference leak in gpio_mpsse_probe() error paths
  gpio: pca953x: handle short interrupt pulses on PCAL devices
  gpiolib: fix race condition for gdev->srcu
  gpio: shared: allow sharing a reset-gpios pin between reset-gpio and gpiolib
  gpio: shared: verify con_id when adding proxy lookup
  gpiolib: allow multiple lookup tables per consumer
  gpio: it87: balance superio enter/exit calls in error path
2026-01-09 06:10:22 -10:00
Linus Torvalds
cbd4480cfa drm fixes for 6.19-rc5
MAINTAINERS:
 - Fix Nova GPU driver git links.
 - Fix typo in TYR driver entry preventing correct behavior of
   scripts/get_maintainer.pl.
 - Exclude TYR driver from DRM MISC.
 
 nova-core:
 - Correctly select RUST_FW_LOADER_ABSTRACTIONS to prevent build
   errors.
 - Regenerate nova-core bindgen bindings with '--explicit-padding' to
   avoid uninitialized bytes.
 - Fix length of received GSP messages, due to miscalculated message
   payload size.
 - Regenerate bindings to derive MaybeZeroable.
 - Use a bindings alias to derive the firmware version.
 
 exynos:
 - hdmi: replace system_wq with system_percpu_wq
 
 pl111:
 - Fix error handling in probe
 
 mediatek/atomic/tidss:
 - Fix tidss in another way and revert reordering of pre-enable and post-disable operations,
   as it breaks other bridge drivers.
 
 nouveau:
 - Fix regression from fwsec s/r fix.
 
 pci/vga:
 - Fix multiple gpu's being reported a 'boot_display'
 
 fb-helper:
 - Fix vblank timeout during suspend/reset
 
 amdgpu:
 - Clang fixes
 - Navi1x PCIe DPM fixes
 - Ring reset fixes
 - ISP suspend fix
 - Analog DC fixes
 - VPE fixes
 - Mode1 reset fix
 
 radeon:
 - Variable sized array fix
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmlglnMACgkQDHTzWXnE
 hr7jcg//cUeuoY47qg5o5jpJOjhgjDkj6+5OWv5TBZBZJRNtB0FphEa12pV+V6FY
 c4efvX6eB5Y+36067vDdNdV4eVfKIX4wSUCD07IpqNotwfQB+YpVXFS1wTyDYk/J
 VHOkChiqZHx4LGhIfqPQCatIo3H+h/xRpp6/AcAxWw2IKSendla9qSNAh1MHDDfU
 LEGp9RE/nbhMmAXw8njRV8qIjLV9jsPq4FnGTqOD7V4X/gt/t2g3goKnbQ2xrAY+
 dAa37rhYi6I0IeD1nJmnmBvNulP/5bMSzvse53kQNpphkv+DtSyjW1WSXLI4CV7w
 xJIAz4NV62qHqqahi9MFRWSn0A/tphjbOVj4qQ1E5Y1LObX4MP63b5elwbQ6GFbo
 pUroDdIW1ko92pQU27tjslDqDIo0TbUJHBjaSRKe7WTOrfgOzDExpg7UbSqPRUxt
 ConvZupRcrzUxspiVJi9HejFDC2QS1uwyoEmLhjHlBHLKSrtc0xHTDZrVc7BrfoM
 EdrCT/tgEvOWjJOYH/HuInaKuaXXuWwKBuwjhETTqUvR8+xmPBVgIAMrxox1gJAf
 p0UfM6y7AKsWeXpdN9+FBKOpvpXMst2dKjJ83k53Z3HBNp+5jE7f9VMp085u46JO
 dOnIsUd7wtILpTkpGUGG4AAmoTlhaynO+vlB4xwJRW3B4eydAkM=
 =kmaJ
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2026-01-09' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "I missed the drm-rust fixes tree for last week, so this catches up on
  that, along with amdgpu, and then some misc fixes across a few
  drivers. I hadn't got an xe pull by the time I sent this, I suspect
  one will arrive 10 mins after, but I don't think there is anything
  that can't wait for next week.

  Things seem to have picked up a little with people coming back from
  holidays,

  MAINTAINERS:
   - Fix Nova GPU driver git links
   - Fix typo in TYR driver entry preventing correct behavior of
     scripts/get_maintainer.pl
   - Exclude TYR driver from DRM MISC

  nova-core:
   - Correctly select RUST_FW_LOADER_ABSTRACTIONS to prevent build
     errors
   - Regenerate nova-core bindgen bindings with '--explicit-padding' to
     avoid uninitialized bytes
   - Fix length of received GSP messages, due to miscalculated message
     payload size
   - Regenerate bindings to derive MaybeZeroable
   - Use a bindings alias to derive the firmware version

  exynos:
   - hdmi: replace system_wq with system_percpu_wq

  pl111:
   - Fix error handling in probe

  mediatek/atomic/tidss:
   - Fix tidss in another way and revert reordering of pre-enable and
     post-disable operations, as it breaks other bridge drivers

  nouveau:
   - Fix regression from fwsec s/r fix

  pci/vga:
   - Fix multiple gpu's being reported a 'boot_display'

  fb-helper:
   - Fix vblank timeout during suspend/reset

  amdgpu:
   - Clang fixes
   - Navi1x PCIe DPM fixes
   - Ring reset fixes
   - ISP suspend fix
   - Analog DC fixes
   - VPE fixes
   - Mode1 reset fix

  radeon:
   - Variable sized array fix"

* tag 'drm-fixes-2026-01-09' of https://gitlab.freedesktop.org/drm/kernel: (32 commits)
  Reapply "Revert "drm/amd: Skip power ungate during suspend for VPE""
  drm/amd/display: Check NULL before calling dac_load_detection
  drm/amd/pm: Disable MMIO access during SMU Mode 1 reset
  drm/exynos: hdmi: replace use of system_wq with system_percpu_wq
  drm/fb-helper: Fix vblank timeout during suspend/reset
  PCI/VGA: Don't assume the only VGA device on a system is `boot_vga`
  drm/amdgpu: Fix query for VPE block_type and ip_count
  drm/amd/display: Add missing encoder setup to DACnEncoderControl
  drm/amd/display: Correct color depth for SelectCRTC_Source
  drm/amd/amdgpu: Fix SMU warning during isp suspend-resume
  drm/amdgpu: always backup and reemit fences
  drm/amdgpu: don't reemit ring contents more than once
  drm/amd/pm: force send pcie parmater on navi1x
  drm/amd/pm: fix wrong pcie parameter on navi1x
  drm/radeon: Remove __counted_by from ClockInfoArray.clockInfo[]
  drm/amd/display: Reduce number of arguments of dcn30's CalculateWatermarksAndDRAMSpeedChangeSupport()
  drm/amd/display: Reduce number of arguments of dcn30's CalculatePrefetchSchedule()
  drm/amd/display: Apply e4479aecf658 to dml
  nouveau: don't attempt fwsec on sb on newer platforms
  drm/tidss: Fix enable/disable order
  ...
2026-01-09 06:04:05 -10:00
Linus Torvalds
2bfe3e0da6 vfs-6.19-rc5.fixes
-----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCaWDTUAAKCRCRxhvAZXjc
 okDeAQDsEpgFy8hpD08HBs4TpUXv7MSRqwZS7emlsfUqEjeprwEAgu95YuJ+z6hV
 RFk/Lior/+YlB5FN5VcKzyQGMuRDUwc=
 =snsQ
 -----END PGP SIGNATURE-----

Merge tag 'vfs-6.19-rc5.fixes' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs

Pull vfs fixes from Christian Brauner:

 - Remove incorrect __user annotation from struct xattr_args::value

 - Documentation fix: Add missing kernel-doc description for the @isnew
   parameter in ilookup5_nowait() to silence Sphinx warnings

 - Documentation fix: Fix kernel-doc comment for __start_dirop() - the
   function name in the comment was wrong and the @state parameter was
   undocumented

 - Replace dynamic folio_batch allocation with stack allocation in
   iomap_zero_range(). The dynamic allocation was problematic for
   ext4-on-iomap work (didn't handle allocation failure properly) and
   triggered lockdep complaints. Uses a flag instead to control batch
   usage

 - Re-add #ifdef guards around PIDFD_GET_<ns-type>_NAMESPACE ioctls.
   When a namespace type is disabled, ns->ops is NULL, causes crashes
   during inode eviction when closing the fd. The ifdefs were removed in
   a recent simplification but are still needed

 - Fixe a race where a folio could be unlocked before the trailing zeros
   (for EOF within the page) were written

 - Split out a dedicated lease_dispose_list() helper since lease code
   paths always know they're disposing of leases. Removes unnecessary
   runtime flag checks and prepares for upcoming lease_manager
   enhancements

 - Fix userland delegation requests succeeding despite conflicting
   opens. Previously, FL_LAYOUT and FL_DELEG leases bypassed conflict
   checks (a hack for nfsd). Adds new ->lm_open_conflict() lease_manager
   operation so userland delegations get proper conflict checking while
   nfsd can continue its own conflict handling

 - Fix LOOKUP_CACHED path lookups incorrectly falling through to the
   slow path. After legitimize_links() calls were conditionally elided,
   the routine would always fail with LOOKUP_CACHED regardless of
   whether there were any links. Now the flag is checked at the two
   callsites before calling legitimize_links()

 - Fix bug in media fd allocation in media_request_alloc()

 - Fix mismatched API calls in ecryptfs_mknod(): was calling
   end_removing() instead of end_creating() after
   ecryptfs_start_creating_dentry()

 - Fix dentry reference count leak in ecryptfs_mkdir(): a dget() of the
   lower parent dir was added but never dput()'d, causing BUG during
   lower filesystem unmount due to the still-in-use dentry

* tag 'vfs-6.19-rc5.fixes' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs:
  pidfs: protect PIDFD_GET_* ioctls() via ifdef
  ecryptfs: Release lower parent dentry after creating dir
  ecryptfs: Fix improper mknod pairing of start_creating()/end_removing()
  get rid of bogus __user in struct xattr_args::value
  VFS: fix __start_dirop() kernel-doc warnings
  fs: Describe @isnew parameter in ilookup5_nowait()
  fs: make sure to fail try_to_unlazy() and try_to_unlazy() for LOOKUP_CACHED
  netfs: Fix early read unlock of page with EOF in middle
  filelock: allow lease_managers to dictate what qualifies as a conflict
  filelock: add lease_dispose_list() helper
  iomap: replace folio_batch allocation with stack allocation
  media: mc: fix potential use-after-free in media_request_alloc()
2026-01-09 05:57:57 -10:00
Linus Torvalds
77d4c5da97 This push contains the following changes:
- Fix duplicate restart messages in qat.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEn51F/lCuNhUwmDeSxycdCkmxi6cFAmlgzoQACgkQxycdCkmx
 i6czXw//QkusOaqZSeG0tPItRlEcILIdf6KAA5wnOrf9O82bh0HvSZBSOK6+eWrx
 0zVXoadIzVHHW+qZZ00J7aUM6/Q9h4AX1UIPXWWWcfT/HE40UBlHeEP+AFg7Ff4n
 JSX6X0EOMo12VMLAQo2oS9zIVUaL8nRZFFnLT8IfBNRswC0XklRNnDh6/RrTGO+/
 UunOrcaV0UOliGfBqiUTEfDf1TQ12WCks9F7ckrBVvnSAY6BYdcJaHJekQmvgUm0
 fxZIv9Zsh8rkJkw9HKvgqb4stG5IGEBddfbC8XpJyFk3N/t4xW56ttgX+Q0OgLt/
 htICm1bDUPvEPpPrzZTgb/FdL5TGf+ZsJeU77Up8wKRdl3H1FvU6uJ7j/uMereY4
 ywqyAW82g2yHNooFAKEsAGfR3U6pE+9Xbzl8ahsWw2wUZ2BJ0xMHi9RLfFGOgoR1
 gyWaYyQKogq4er45EVkuW+Pkac7T9O0QFGxUi96KuR0lajGWRyHD22ZF0/xru+kT
 qwBkoEToCUQ3g3H770Xnxfc20MqaBB1+aGgPMtWwVUyWykYWWIG+5zUGRU+rMlkM
 pbeOI7NUMBDFEydywbxSmuP0c8X+sM3FHb/6BVM+8ykNF3U8RVU97ErYD8xsX6EJ
 mG/GhXZ7k9J4Dm0pDCWFhJwsWZ4CLlh+Nh3KdUXJYDnxhgbfpkc=
 =e1Vy
 -----END PGP SIGNATURE-----

Merge tag 'v6.19-p3' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fix from Herbert Xu:

 - Fix duplicate restart messages in qat

* tag 'v6.19-p3' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: qat - fix duplicate restarting msg during AER error
2026-01-09 05:55:34 -10:00
Bartosz Golaszewski
d578b31856 gpio: shared: fix a false-positive sharing detection with reset-gpios
After scanning the devicetree, we remove all entries that have only one
reference, while creating GPIO shared proxies for the remaining, shared
entries. However: for the reset-gpio corner-case, we will have two
references for a "reset-gpios" pin that's not really shared. In this
case one will come from the actual consumer fwnode and the other from
the potential auxiliary reset-gpio device. This causes the GPIO core to
create unnecessary GPIO shared proxy devices for pins that are not
really shared.

Add a function that can detect this situation and remove entries that
have exactly two references but one of them is a reset-gpio.

Fixes: 7b78b26757e0 ("gpio: shared: handle the reset-gpios corner case")
Link: https://lore.kernel.org/r/20260108-gpio-shared-false-positive-v1-1-5dbf8d1b2f7d@oss.qualcomm.com
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
2026-01-09 09:56:46 +01:00
Bartosz Golaszewski
36f597bba0 gpiolib: fix lookup table matching
If on any iteration in gpiod_find(), gpio_desc_table_match() returns
NULL (which is normal and expected), we never reinitialize desc back to
ERR_PTR(-ENOENT) and if we don't find a match later on, we will return
NULL causing a NULL-pointer dereference in users not expecting it. Don't
initialize desc, but return ERR_PTR(-ENOENT) explicitly at the end of
the function.

Fixes: 9700b0fccf38 ("gpiolib: allow multiple lookup tables per consumer")
Reported-by: Marek Szyprowski <m.szyprowski@samsung.com>
Closes: https://lore.kernel.org/all/00107523-7737-4b92-a785-14ce4e93b8cb@samsung.com/
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/20260108102314.18816-1-bartosz.golaszewski@oss.qualcomm.com
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
2026-01-08 15:23:15 +01:00
Dave Airlie
f6eac56d6b amd-drm-fixes-6.19-2026-01-06:
amdgpu:
 - Clang fixes
 - Navi1x PCIe DPM fixes
 - Ring reset fixes
 - ISP suspend fix
 - Analog DC fixes
 - VPE fixes
 - Mode1 reset fix
 
 radeon:
 - Variable sized array fix
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCaV7d/wAKCRC93/aFa7yZ
 2JW/AP9YGcJQbq6yOaWwmfZMRowNmhZQLoEpbfr7GNNljbzs2wEAgdjpExqO/iTJ
 oSi5DH758hqLSoifAu6YODFwnYuOJAk=
 =UEqS
 -----END PGP SIGNATURE-----

Merge tag 'amd-drm-fixes-6.19-2026-01-06' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.19-2026-01-06:

amdgpu:
- Clang fixes
- Navi1x PCIe DPM fixes
- Ring reset fixes
- ISP suspend fix
- Analog DC fixes
- VPE fixes
- Mode1 reset fix

radeon:
- Variable sized array fix

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patch.msgid.link/20260107223315.16095-1-alexander.deucher@amd.com
2026-01-08 10:34:27 +10:00
Mario Limonciello (AMD)
6b2989ac5e Reapply "Revert "drm/amd: Skip power ungate during suspend for VPE""
Skipping power ungate exposed some scenarios that will fail
like below:

```
amdgpu: Register(0) [regVPEC_QUEUE_RESET_REQ] failed to reach value 0x00000000 != 0x00000001n
amdgpu 0000:c1:00.0: amdgpu: VPE queue reset failed
...
amdgpu: [drm] *ERROR* wait_for_completion_timeout timeout!
```

The underlying s2idle issue that prompted this commit is going to
be fixed in BIOS.
This reverts commit 2a6c826cfeedd7714611ac115371a959ead55bda.

This was lost in the 6.19 merge so reapply it.

Fixes: 2a6c826cfeed ("drm/amd: Skip power ungate during suspend for VPE")
Signed-off-by: Mario Limonciello (AMD) <superm1@kernel.org>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Reported-by: Konstantin <answer2019@yandex.ru>
Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220812
Reported-by: Matthew Schwartz <matthew.schwartz@linux.dev>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit 3925683515e93844be204381d2d5a1df5de34f31)
2026-01-07 17:24:16 -05:00
Alex Hung
eb236fb911 drm/amd/display: Check NULL before calling dac_load_detection
dac_load_detection can be NULL in some scenario, so checking it before
calling.

Reviewed-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Hung <alex.hung@amd.com>
Signed-off-by: Chenyu Chen <chen-yu.chen@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit 179176134b535246f0b368b30e8ecad50066f896)
2026-01-07 17:24:16 -05:00
Perry Yuan
0de604d035 drm/amd/pm: Disable MMIO access during SMU Mode 1 reset
During Mode 1 reset, the ASIC undergoes a reset cycle and becomes
temporarily inaccessible via PCIe. Any attempt to access MMIO registers
during this window (e.g., from interrupt handlers or other driver threads)
can result in uncompleted PCIe transactions, leading to NMI panics or
system hangs.

To prevent this, set the `no_hw_access` flag to true immediately after
triggering the reset. This signals other driver components to skip
register accesses while the device is offline.

A memory barrier `smp_mb()` is added to ensure the flag update is
globally visible to all cores before the driver enters the sleep/wait
state.

Signed-off-by: Perry Yuan <perry.yuan@amd.com>
Reviewed-by: Yifan Zhang <yifan1.zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit 7edb503fe4b6d67f47d8bb0dfafb8e699bb0f8a4)
2026-01-07 17:24:10 -05:00
Dave Airlie
a5f207e272 drm-misc-fixes for v6.19-rc5:
pl111:
 - Fix error handling in probe
 
 mediatek/atomic/tidss:
 - Fix tidss in another way and revert reordering of pre-enable and post-disable operations,
   as it breaks other bridge drivers.
 
 nouveau:
 - Fix regression from fwsec s/r fix.
 
 pci/vga:
 - Fix multiple gpu's being reported a 'boot_display'
 
 fb-helper:
 - Fix vblank timeout during suspend/reset
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEuXvWqAysSYEJGuVH/lWMcqZwE8MFAmleKu8ACgkQ/lWMcqZw
 E8NIAw//SDUHAGuSU6trFYt1GueHcGk+KjEN8aRIaE+6v/IvCqfOaL7HeZXDm0TB
 C0piYBioGowhT8kAekYSRs6Gm/uCQjB65tflqU53V3SZf4tZfyDAqAxK2zbVkpI/
 ZIqdsWoQwazk6Qqe92+67jjYEJF+h1ycVYD72qRf3V4SduSmZoeARfRkAlZ+ZAGO
 zzKmy2Jb8WMnVeemxtc5XTc5+0fjjDaFmpkHuOkdo7+7zX2rZw1JPU6HIy2xVWBv
 /NvOqDGf+ALu3aT6FHzJvcH4uRXsAxj/q505jq9YrxJZjMpMq0EsW+/8/pjuBEYG
 CdsyHlJnnx1br20dBb7MU68jwvlXuVc7qbaAwC0+eRgX9k8apetiV/7WwOxwyFe0
 DwZSyJ2ml7poeD4mul7AKiWteZTRCnBnqMPFGMiWvw5LChZdmXRiNIfgfUoA2HCQ
 Lnsin0EliGX0YQV5XTtEhQY2SMH+FZEJwfym/IQA9mnYNqOGRhVpLfTdW2Z9Zs5c
 OjU5uxfbEiaMDJ66eT39/kTSL+TEWSFnRIaSeibsi2w9rF5WTCXf0Ojv7iSbfrCB
 Lajrhy+MvGlbxaloJeFDLczaS/BJSbQWefCb2xugjIYP8h2k/JTHlnHEwrU9zUfx
 YQ8hMsTi4PpJVmJX3WY4zQOStw8oP58zCx+PV3kyZwJjOoAjHn4=
 =PTH1
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-fixes-2026-01-07' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes

drm-misc-fixes for v6.19-rc5:

pl111:
- Fix error handling in probe

mediatek/atomic/tidss:
- Fix tidss in another way and revert reordering of pre-enable and post-disable operations,
  as it breaks other bridge drivers.

nouveau:
- Fix regression from fwsec s/r fix.

pci/vga:
- Fix multiple gpu's being reported a 'boot_display'

fb-helper:
- Fix vblank timeout during suspend/reset

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patch.msgid.link/f50067e6-243a-4ed8-9781-1e4e4fdebc8e@linux.intel.com
2026-01-08 07:41:00 +10:00
Dave Airlie
50cf611b0b One code cleanup
. Replace system_wq with system_percpu_wq in the Exynos HDMI driver.
   system_wq is effectively a per-cpu workqueue, but its name does not make
   this explicit. Recent workqueue changes introduced system_percpu_wq to clarify
   semantics and support ongoing workqueue API refactoring.
 -----BEGIN PGP SIGNATURE-----
 
 iQGzBAABCgAdFiEEoxi+6c5pRPV/gdXcxWAb7Og/+bYFAmleIh8ACgkQxWAb7Og/
 +bZJoAv/T9ph3ZICuRUcIz4fB8uKmwE+lfH2+fO17j1ZFzcq2NcJpbe5xM9MODvw
 576OBWiPdTuEBKF6RsnJrqiwwYTGP3p8Em05g0eAIB2GpF/6VFgW5XUsNmRAuPzV
 F1m42rfQvZcsGd9tz+QSmYALk/cGdztH5RoCTQFWHxlMvIGC312QR8xb9of69BPG
 ojgc1XjAAI0bffTcKMAG5TrMvUggccy1xnWSqj6NwN6EWyg2wcKdBmbSi/obgSto
 u3rrOcrCoXbYg7HSGlJgffcSEqaD+s6TM9i0IXvfAdu7ZSeM5mvOT/TaCzXgI+g8
 7+GeZaTsRfLXhS2mPsWh3rSexHuI55oEw3Mkhvg9svQGworwLnptO/6gkr9F+bPE
 0+JLgR8k8fJAFW4Ey1XY9oVjplL+v2+3eSorsls7wIfAHgjhDf3j3onp5PJ9p5KX
 Uvram4yUrfu7CBWyEz4tYFD2ADIlYVoLGROrEM/56GTfEEmRxvaiuv73smibJaiS
 u3RyTAHG
 =0Sg+
 -----END PGP SIGNATURE-----

Merge tag 'exynos-drm-fixes-v6.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos into drm-fixes

One code cleanup
. Replace system_wq with system_percpu_wq in the Exynos HDMI driver.
  system_wq is effectively a per-cpu workqueue, but its name does not make
  this explicit. Recent workqueue changes introduced system_percpu_wq to clarify
  semantics and support ongoing workqueue API refactoring.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Inki Dae <inki.dae@samsung.com>
Link: https://patch.msgid.link/20260107091154.27436-1-inki.dae@samsung.com
2026-01-08 07:34:42 +10:00
Ilpo Järvinen
bdb32359ea sparc/PCI: Correct 64-bit non-pref -> pref BAR resources
SPARC T5-2 dts describes some PCI BARs as 64-bit resources without the
pref(etchable) bit (0x83... vs 0xc3... in assigned-addresses) for address
ranges above the 4G threshold. Such resources cannot be placed into a
non-prefetchable PCI bridge window that is capable only of 32-bit
addressing. As such, it looks like the platform is improperly described by
the dts.

The kernel detects this problem (see the IORESOURCE_PREFETCH check in
pci_find_parent_resource()) and fails to assign these BAR resources to the
resource tree due to lack of a compatible bridge window.

Prior to 754babaaf333 ("sparc/PCI: Remove pcibios_enable_device() as they
do nothing extra") SPARC arch code did not test whether device resources
were successfully in the resource tree when enabling a device, effectively
hiding the problem. After removing the arch-specific enable code,
pci_enable_resources() refuses to enable the device when it finds not all
mem resources are assigned, and therefore mpt3sas can't be enabled:

  pci 0001:04:00.0: reg 0x14: [mem 0x801110000000-0x80111000ffff 64bit]
  pci 0001:04:00.0: reg 0x1c: [mem 0x801110040000-0x80111007ffff 64bit]
  pci 0001:04:00.0: BAR 1 [mem 0x801110000000-0x80111000ffff 64bit]: can't claim; no compatible bridge window
  pci 0001:04:00.0: BAR 3 [mem 0x801110040000-0x80111007ffff 64bit]: can't claim; no compatible bridge window
  mpt3sas 0001:04:00.0: BAR 1 [mem size 0x00010000 64bit]: not assigned; can't enable device

For clarity, this filtered log only shows failures for one mpt3sas device
but other devices fail similarly. In the reported case, the end result with
all the failures is an unbootable system.

Things appeared to "work" before 754babaaf333 ("sparc/PCI: Remove
pcibios_enable_device() as they do nothing extra") because the resource
tree is agnostic to whether PCI BAR resources are properly in the tree or
not. So as long as there was a parent resource (e.g. a root bus resource)
that contains the address range, the resource tree code just places
resource request underneath it without any consideration to the
intermediate BAR resource. While it worked, it's incorrect setup still.

Add an OF fixup to set the IORESOURCE_PREFETCH flag for a 64-bit PCI
resource that has the end address above 4G requiring placement into the
prefetchable window. Also log the issue.

Fixes: 754babaaf333 ("sparc/PCI: Remove pcibios_enable_device() as they do nothing extra")
Reported-by: Nathaniel Roach <nroach44@gmail.com>
Closes: https://github.com/sparclinux/issues/issues/22
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Tested-by: Nathaniel Roach <nroach44@gmail.com>
Link: https://patch.msgid.link/20251124170411.3709-1-ilpo.jarvinen@linux.intel.com
2026-01-07 14:22:32 -06:00
Marco Crivellari
8e6ad0dac6
drm/exynos: hdmi: replace use of system_wq with system_percpu_wq
Currently if a user enqueue a work item using schedule_delayed_work() the
used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
schedule_work() that is using system_wq and queue_work(), that makes use
again of WORK_CPU_UNBOUND.

This lack of consistentcy cannot be addressed without refactoring the API.

This patch continues the effort to refactor worqueue APIs, which has begun
with the change introducing new workqueues and a new alloc_workqueue flag:

commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")
commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag")

system_wq should be the per-cpu workqueue, yet in this name nothing makes
that clear, so replace system_wq with system_percpu_wq.

The old wq (system_wq) will be kept for a few release cycles.

Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
2026-01-07 17:50:50 +09:00
Bartosz Golaszewski
a80208072d gpio: shared: don't allocate the lookup table until we really need it
We allocate memory for the GPIO lookup table at the top of
gpio_shared_add_proxy_lookup() but we don't use it until the very end.
Depending on the timing, we may return earlier. Move the allocation
towards the end.

Fixes: a060b8c511ab ("gpiolib: implement low-level, shared GPIO support")
Tested-by: Mark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20260106-gpio-shared-fixes-v2-3-c7091d2f7581@oss.qualcomm.com
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
2026-01-07 09:31:14 +01:00
Bartosz Golaszewski
476e44d06f gpio: shared: fix a race condition
When matching the reset-gpio reference with the actual firmware node
consuming the GPIO, we also need to lock the structure associated with
the latter as it can change while we're doing it.

Due to triggering lockdep false-positives, we need to use a per-reference
lockdep class but accidentally, this also allows us to remove the
previous lockdep workaround for cleaner code.

Fixes: 49416483a953 ("gpio: shared: allow sharing a reset-gpios pin between reset-gpio and gpiolib")
Reported-by: Marek Szyprowski <m.szyprowski@samsung.com>
Closes: https://lore.kernel.org/all/00107523-7737-4b92-a785-14ce4e93b8cb@samsung.com/
Tested-by: Mark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20260106-gpio-shared-fixes-v2-2-c7091d2f7581@oss.qualcomm.com
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
2026-01-07 09:31:14 +01:00
Bartosz Golaszewski
0fe5063179 gpio: shared: assign the correct firmware node for reset-gpio use-case
When we defer probe due to unlucky timing of adding the lookup table, we
assign the matching firmware node to the shared reference for the future
probing. However, the fwnode we assign is wrong so fix it and assign the
one associated with the reset-gpio device.

Fixes: 49416483a953 ("gpio: shared: allow sharing a reset-gpios pin between reset-gpio and gpiolib")
Reported-by: Marek Szyprowski <m.szyprowski@samsung.com>
Closes: https://lore.kernel.org/all/00107523-7737-4b92-a785-14ce4e93b8cb@samsung.com/
Tested-by: Mark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20260106-gpio-shared-fixes-v2-1-c7091d2f7581@oss.qualcomm.com
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
2026-01-07 09:31:14 +01:00
Bartosz Golaszewski
20cf2aed89 gpio: rockchip: mark the GPIO controller as sleeping
The GPIO controller is configured as non-sleeping but it uses generic
pinctrl helpers which use a mutex for synchronization.

This can cause the following lockdep splat with shared GPIOs enabled on
boards which have multiple devices using the same GPIO:

BUG: sleeping function called from invalid context at
kernel/locking/mutex.c:591
in_atomic(): 1, irqs_disabled(): 1, non_block: 0, pid: 12, name:
kworker/u16:0
preempt_count: 1, expected: 0
RCU nest depth: 0, expected: 0
6 locks held by kworker/u16:0/12:
  #0: ffff0001f0018d48 ((wq_completion)events_unbound#2){+.+.}-{0:0},
at: process_one_work+0x18c/0x604
  #1: ffff8000842dbdf0 (deferred_probe_work){+.+.}-{0:0}, at:
process_one_work+0x1b4/0x604
  #2: ffff0001f18498f8 (&dev->mutex){....}-{4:4}, at:
__device_attach+0x38/0x1b0
  #3: ffff0001f75f1e90 (&gdev->srcu){.+.?}-{0:0}, at:
gpiod_direction_output_raw_commit+0x0/0x360
  #4: ffff0001f46e3db8 (&shared_desc->spinlock){....}-{3:3}, at:
gpio_shared_proxy_direction_output+0xd0/0x144 [gpio_shared_proxy]
  #5: ffff0001f180ee90 (&gdev->srcu){.+.?}-{0:0}, at:
gpiod_direction_output_raw_commit+0x0/0x360
irq event stamp: 81450
hardirqs last  enabled at (81449): [<ffff8000813acba4>]
_raw_spin_unlock_irqrestore+0x74/0x78
hardirqs last disabled at (81450): [<ffff8000813abfb8>]
_raw_spin_lock_irqsave+0x84/0x88
softirqs last  enabled at (79616): [<ffff8000811455fc>]
__alloc_skb+0x17c/0x1e8
softirqs last disabled at (79614): [<ffff8000811455fc>]
__alloc_skb+0x17c/0x1e8
CPU: 2 UID: 0 PID: 12 Comm: kworker/u16:0 Not tainted
6.19.0-rc4-next-20260105+ #11975 PREEMPT
Hardware name: Hardkernel ODROID-M1 (DT)
Workqueue: events_unbound deferred_probe_work_func
Call trace:
  show_stack+0x18/0x24 (C)
  dump_stack_lvl+0x90/0xd0
  dump_stack+0x18/0x24
  __might_resched+0x144/0x248
  __might_sleep+0x48/0x98
  __mutex_lock+0x5c/0x894
  mutex_lock_nested+0x24/0x30
  pinctrl_get_device_gpio_range+0x44/0x128
  pinctrl_gpio_direction+0x3c/0xe0
  pinctrl_gpio_direction_output+0x14/0x20
  rockchip_gpio_direction_output+0xb8/0x19c
  gpiochip_direction_output+0x38/0x94
  gpiod_direction_output_raw_commit+0x1d8/0x360
  gpiod_direction_output_nonotify+0x7c/0x230
  gpiod_direction_output+0x34/0xf8
  gpio_shared_proxy_direction_output+0xec/0x144 [gpio_shared_proxy]
  gpiochip_direction_output+0x38/0x94
  gpiod_direction_output_raw_commit+0x1d8/0x360
  gpiod_direction_output_nonotify+0x7c/0x230
  gpiod_configure_flags+0xbc/0x480
  gpiod_find_and_request+0x1a0/0x574
  gpiod_get_index+0x58/0x84
  devm_gpiod_get_index+0x20/0xb4
  devm_gpiod_get_optional+0x18/0x30
  rockchip_pcie_probe+0x98/0x380
  platform_probe+0x5c/0xac
  really_probe+0xbc/0x298

Fixes: 936ee2675eee ("gpio/rockchip: add driver for rockchip gpio")
Cc: stable@vger.kernel.org
Reported-by: Marek Szyprowski <m.szyprowski@samsung.com>
Closes: https://lore.kernel.org/all/d035fc29-3b03-4cd6-b8ec-001f93540bc6@samsung.com/
Acked-by: Heiko Stuebner <heiko@sntech.de>
Link: https://lore.kernel.org/r/20260106090011.21603-1-bartosz.golaszewski@oss.qualcomm.com
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
2026-01-07 09:30:40 +01:00
Christian Brauner
75ddaa4ddc
pidfs: protect PIDFD_GET_* ioctls() via ifdef
We originally protected PIDFD_GET_<ns-type>_NAMESPACE ioctls() through
ifdefs and recent rework made it possible to drop them. There was an
oversight though. When the relevant namespace is turned off ns->ops will
be NULL so even though opening a file descriptor is perfectly legitimate
it would fail during inode eviction when the file was closed.

The simple fix would be to check ns->ops for NULL and continue allow to
retrieve namespace fds from pidfds but we don't allow retrieving them
when the relevant namespace type is turned off. So keep the
simplification but add the ifdefs back in.

Link: https://lore.kernel.org/20251222214907.GA189632@quark
Link: https://patch.msgid.link/20251224-ununterbrochen-gagen-ea949b83f8f2@brauner
Fixes: a71e4f103aed ("pidfs: simplify PIDFD_GET_<type>_NAMESPACE ioctls")
Tested-by: Brendan Jackman <jackmanb@kernel.org>
Tested-by: Eric Biggers <ebiggers@kernel.org>
Reported-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
2026-01-06 23:08:12 +01:00
Chengjun Yao
d5bdf88d1f drm/fb-helper: Fix vblank timeout during suspend/reset
During GPU reset, VBlank interrupts are disabled which causes
drm_fb_helper_fb_dirty() to wait for VBlank timeout. This will create
call traces like (seen on an RX7900 series dGPU):

[  101.313646] ------------[ cut here ]------------
[  101.313648] amdgpu 0000:03:00.0: [drm] vblank wait timed out on crtc 0
[  101.313657] WARNING: CPU: 0 PID: 461 at drivers/gpu/drm/drm_vblank.c:1320 drm_wait_one_vblank+0x176/0x220
[  101.313663] Modules linked in: amdgpu amdxcp drm_panel_backlight_quirks gpu_sched drm_buddy drm_ttm_helper ttm drm_exec drm_suballoc_helper drm_display_helper cec rc_core i2c_algo_bit nf_conntrack_netlink xt_nat xt_tcpudp veth xt_conntrack xt_MASQUERADE bridge stp llc xfrm_user xfrm_algo xt_set ip_set nft_chain_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 xt_addrtype nft_compat x_tables nf_tables overlay qrtr sunrpc snd_hda_codec_alc882 snd_hda_codec_realtek_lib snd_hda_codec_generic snd_hda_codec_atihdmi snd_hda_codec_hdmi snd_hda_intel snd_hda_codec snd_hda_core snd_intel_dspcfg snd_intel_sdw_acpi snd_hwdep snd_pcm amd_atl intel_rapl_msr snd_seq_midi intel_rapl_common asus_ec_sensors snd_seq_midi_event snd_rawmidi snd_seq eeepc_wmi snd_seq_device edac_mce_amd asus_wmi polyval_clmulni ghash_clmulni_intel snd_timer platform_profile aesni_intel wmi_bmof sparse_keymap joydev snd rapl input_leds i2c_piix4 soundcore ccp k10temp i2c_smbus gpio_amdpt mac_hid binfmt_misc sch_fq_codel msr parport_pc ppdev lp parport
[  101.313745]  efi_pstore nfnetlink dmi_sysfs autofs4 hid_generic usbhid hid r8169 realtek ahci libahci video wmi
[  101.313760] CPU: 0 UID: 0 PID: 461 Comm: kworker/0:2 Not tainted 6.18.0-rc6-174403b3b920 #1 PREEMPT(voluntary)
[  101.313763] Hardware name: ASUS System Product Name/TUF GAMING X670E-PLUS, BIOS 0821 11/15/2022
[  101.313765] Workqueue: events drm_fb_helper_damage_work
[  101.313769] RIP: 0010:drm_wait_one_vblank+0x176/0x220
[  101.313772] Code: 7c 24 08 4c 8b 77 50 4d 85 f6 0f 84 a1 00 00 00 e8 2f 11 03 00 44 89 e9 4c 89 f2 48 c7 c7 d0 ad 0d a8 48 89 c6 e8 2a e0 4a ff <0f> 0b e9 f2 fe ff ff 48 85 ff 74 04 4c 8b 67 08 4d 8b 6c 24 50 4d
[  101.313774] RSP: 0018:ffffc99c00d47d68 EFLAGS: 00010246
[  101.313777] RAX: 0000000000000000 RBX: 000000000200038a RCX: 0000000000000000
[  101.313778] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
[  101.313779] RBP: ffffc99c00d47dc0 R08: 0000000000000000 R09: 0000000000000000
[  101.313781] R10: 0000000000000000 R11: 0000000000000000 R12: ffff8948c4280010
[  101.313782] R13: 0000000000000000 R14: ffff894883263a50 R15: ffff89488c384830
[  101.313784] FS:  0000000000000000(0000) GS:ffff895424692000(0000) knlGS:0000000000000000
[  101.313785] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  101.313787] CR2: 00007773650ee200 CR3: 0000000588e40000 CR4: 0000000000f50ef0
[  101.313788] PKRU: 55555554
[  101.313790] Call Trace:
[  101.313791]  <TASK>
[  101.313795]  ? __pfx_autoremove_wake_function+0x10/0x10
[  101.313800]  drm_crtc_wait_one_vblank+0x17/0x30
[  101.313802]  drm_client_modeset_wait_for_vblank+0x61/0x80
[  101.313805]  drm_fb_helper_damage_work+0x46/0x1a0
[  101.313808]  process_one_work+0x1a1/0x3f0
[  101.313812]  worker_thread+0x2ba/0x3d0
[  101.313816]  kthread+0x107/0x220
[  101.313818]  ? __pfx_worker_thread+0x10/0x10
[  101.313821]  ? __pfx_kthread+0x10/0x10
[  101.313823]  ret_from_fork+0x202/0x230
[  101.313826]  ? __pfx_kthread+0x10/0x10
[  101.313828]  ret_from_fork_asm+0x1a/0x30
[  101.313834]  </TASK>
[  101.313835] ---[ end trace 0000000000000000 ]---

Cancel pending damage work synchronously before console_lock() to ensure
any in-flight framebuffer damage operations complete before suspension.

Also check for FBINFO_STATE_RUNNING in drm_fb_helper_damage_work() to
avoid executing damage work if it is rescheduled while the device is suspended.

Fixes: d8c4bddcd8bc ("drm/fb-helper: Synchronize dirty worker with vblank")
Signed-off-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
Signed-off-by: Chengjun Yao <Chengjun.Yao@amd.com>
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patch.msgid.link/20251215081822.432005-1-Chengjun.Yao@amd.com
2026-01-06 09:05:06 +01:00
Mario Limonciello (AMD)
fd390ff144 PCI/VGA: Don't assume the only VGA device on a system is boot_vga
Some systems ship with multiple display class devices but not all
of them are VGA devices. If the "only" VGA device on the system is not
used for displaying the image on the screen marking it as `boot_vga`
because nothing was found is totally wrong.

This behavior actually leads to mistakes of the wrong device being
advertised to userspace and then userspace can make incorrect decisions.

As there is an accurate `boot_display` sysfs file stop lying about
`boot_vga` by assuming if nothing is found it's the right device.

Reported-by: Aaron Erhardt <aer@tuxedocomputers.com>
Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220712
Tested-by: Aaron Erhardt <aer@tuxedocomputers.com>
Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
Fixes: ad90860bd10ee ("fbcon: Use screen info to find primary device")
Tested-by: Luke D. Jones <luke@ljones.dev>
Signed-off-by: Mario Limonciello (AMD) <superm1@kernel.org>
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patch.msgid.link/20260106044638.52906-1-superm1@kernel.org
2026-01-06 08:15:26 +01:00
Mark Harmstone
2bb83bc42b btrfs: show correct warning if can't read data reloc tree
If a filesystem is missing its data reloc tree, we get something like
this in dmesg:

  BTRFS warning (device loop11): failed to read root (objectid=4): -2
  BTRFS error (device loop11): open_ctree failed: -2

objectid is BTRFS_DEV_TREE_OBJECTID, but this should actually be the
value of BTRFS_DATA_RELOC_TREE_OBJECTID.

btrfs_read_roots() prints location.objectid on failure, but this isn't
set when reading the data reloc tree. Set location.objectid to the
correct value on failure, so that the error message makes sense.

Reviewed-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: Mark Harmstone <mark@harmstone.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2026-01-06 01:23:00 +01:00
Suchit Karunakaran
530e3d4af5 btrfs: fix NULL pointer dereference in do_abort_log_replay()
Coverity reported a NULL pointer dereference issue (CID 1666756) in
do_abort_log_replay(). When btrfs_alloc_path() fails in
replay_one_buffer(), wc->subvol_path is NULL, but btrfs_abort_log_replay()
calls do_abort_log_replay() which unconditionally dereferences
wc->subvol_path when attempting to print debug information. Fix this by
adding a NULL check before dereferencing wc->subvol_path in
do_abort_log_replay().

Fixes: 2753e4917624 ("btrfs: dump detailed info and specific messages on log replay failures")
Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Suchit Karunakaran <suchitkarunakaran@gmail.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2026-01-06 01:23:00 +01:00
Qu Wenruo
cefd809251 btrfs: force free space tree for bs > ps cases
[BUG]
Currently we only enforcing the free space tree for bs < ps cases, but
with the recently added bs > ps support, we lack the free space tree
enforcing, causing explicit v1 cache mount option to fail on bs > ps
cases:

  # mount -o space_cache=v1 /dev/test/scratch1  /mnt/btrfs/
  mount: /mnt/btrfs: wrong fs type, bad option, bad superblock on /dev/mapper/test-scratch1, missing codepage or helper program, or other error.
         dmesg(1) may have more information after failed mount system call.

  # dmesg -t | tail -n7
  BTRFS: device fsid ac14a6fa-4ec9-449e-aec9-7d1777bfdc06 devid 1 transid 11 /dev/mapper/test-scratch1 (253:3) scanned by mount (2849)
  BTRFS info (device dm-3): first mount of filesystem ac14a6fa-4ec9-449e-aec9-7d1777bfdc06
  BTRFS info (device dm-3): using crc32c checksum algorithm
  BTRFS warning (device dm-3): support for block size 8192 with page size 4096 is experimental, some features may be missing
  BTRFS warning (device dm-3): space cache v1 is being deprecated and will be removed in a future release, please use -o space_cache=v2
  BTRFS warning (device dm-3): v1 space cache is not supported for page size 4096 with sectorsize 8192
  BTRFS error (device dm-3): open_ctree failed: -22

[FIX]
Just enable the same free space tree for bs > ps cases, aligning the
behavior to bs < ps cases.

Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2026-01-06 01:22:59 +01:00
Qu Wenruo
30bcf4e824 btrfs: only enforce free space tree if v1 cache is required for bs < ps cases
[BUG]
Since the introduction of btrfs bs < ps support, v1 cache was never on
the plan due to its hard coded PAGE_SIZE usage, and the future plan to
properly deprecate it.

However for bs < ps cases, even if 'nospace_cache,clear_cache' mount
option is specified, it's never respected and free space tree is always
enabled:

  mkfs.btrfs -f -O ^bgt,fst $dev
  mount $dev $mnt -o clear_cache,nospace_cache
  umount $mnt
  btrfs ins dump-super $dev
  ...
  compat_ro_flags		0x3
         		( FREE_SPACE_TREE |
         		  FREE_SPACE_TREE_VALID )
  ...

This means a different behavior compared to bs >= ps cases.

[CAUSE]
The forcing usage of v2 space cache is done inside
btrfs_set_free_space_cache_settings(), however it never checks if we're
even using space cache but always enabling v2 cache.

[FIX]
Instead unconditionally enable v2 cache, only forcing v2 cache if the
old v1 cache is required.

Now v2 space cache can be properly disabled on bs < ps cases:

  mkfs.btrfs -f -O ^bgt,fst $dev
  mount $dev $mnt -o clear_cache,nospace_cache
  umount $mnt
  btrfs ins dump-super $dev
  ...
  compat_ro_flags		0x0
  ...

Fixes: 9f73f1aef98b ("btrfs: force v2 space cache usage for subpage mount")
Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2026-01-06 01:22:59 +01:00
Filipe Manana
8731f2c50b btrfs: release path before initializing extent tree in btrfs_read_locked_inode()
In btrfs_read_locked_inode() we are calling btrfs_init_file_extent_tree()
while holding a path with a read locked leaf from a subvolume tree, and
btrfs_init_file_extent_tree() may do a GFP_KERNEL allocation, which can
trigger reclaim.

This can create a circular lock dependency which lockdep warns about with
the following splat:

   [6.1433] ======================================================
   [6.1574] WARNING: possible circular locking dependency detected
   [6.1583] 6.18.0+ #4 Tainted: G     U
   [6.1591] ------------------------------------------------------
   [6.1599] kswapd0/117 is trying to acquire lock:
   [6.1606] ffff8d9b6333c5b8 (&delayed_node->mutex){+.+.}-{3:3}, at: __btrfs_release_delayed_node.part.0+0x39/0x2f0
   [6.1625]
            but task is already holding lock:
   [6.1633] ffffffffa4ab8ce0 (fs_reclaim){+.+.}-{0:0}, at: balance_pgdat+0x195/0xc60
   [6.1646]
            which lock already depends on the new lock.

   [6.1657]
            the existing dependency chain (in reverse order) is:
   [6.1667]
            -> #2 (fs_reclaim){+.+.}-{0:0}:
   [6.1677]        fs_reclaim_acquire+0x9d/0xd0
   [6.1685]        __kmalloc_cache_noprof+0x59/0x750
   [6.1694]        btrfs_init_file_extent_tree+0x90/0x100
   [6.1702]        btrfs_read_locked_inode+0xc3/0x6b0
   [6.1710]        btrfs_iget+0xbb/0xf0
   [6.1716]        btrfs_lookup_dentry+0x3c5/0x8e0
   [6.1724]        btrfs_lookup+0x12/0x30
   [6.1731]        lookup_open.isra.0+0x1aa/0x6a0
   [6.1739]        path_openat+0x5f7/0xc60
   [6.1746]        do_filp_open+0xd6/0x180
   [6.1753]        do_sys_openat2+0x8b/0xe0
   [6.1760]        __x64_sys_openat+0x54/0xa0
   [6.1768]        do_syscall_64+0x97/0x3e0
   [6.1776]        entry_SYSCALL_64_after_hwframe+0x76/0x7e
   [6.1784]
            -> #1 (btrfs-tree-00){++++}-{3:3}:
   [6.1794]        lock_release+0x127/0x2a0
   [6.1801]        up_read+0x1b/0x30
   [6.1808]        btrfs_search_slot+0x8e0/0xff0
   [6.1817]        btrfs_lookup_inode+0x52/0xd0
   [6.1825]        __btrfs_update_delayed_inode+0x73/0x520
   [6.1833]        btrfs_commit_inode_delayed_inode+0x11a/0x120
   [6.1842]        btrfs_log_inode+0x608/0x1aa0
   [6.1849]        btrfs_log_inode_parent+0x249/0xf80
   [6.1857]        btrfs_log_dentry_safe+0x3e/0x60
   [6.1865]        btrfs_sync_file+0x431/0x690
   [6.1872]        do_fsync+0x39/0x80
   [6.1879]        __x64_sys_fsync+0x13/0x20
   [6.1887]        do_syscall_64+0x97/0x3e0
   [6.1894]        entry_SYSCALL_64_after_hwframe+0x76/0x7e
   [6.1903]
            -> #0 (&delayed_node->mutex){+.+.}-{3:3}:
   [6.1913]        __lock_acquire+0x15e9/0x2820
   [6.1920]        lock_acquire+0xc9/0x2d0
   [6.1927]        __mutex_lock+0xcc/0x10a0
   [6.1934]        __btrfs_release_delayed_node.part.0+0x39/0x2f0
   [6.1944]        btrfs_evict_inode+0x20b/0x4b0
   [6.1952]        evict+0x15a/0x2f0
   [6.1958]        prune_icache_sb+0x91/0xd0
   [6.1966]        super_cache_scan+0x150/0x1d0
   [6.1974]        do_shrink_slab+0x155/0x6f0
   [6.1981]        shrink_slab+0x48e/0x890
   [6.1988]        shrink_one+0x11a/0x1f0
   [6.1995]        shrink_node+0xbfd/0x1320
   [6.1002]        balance_pgdat+0x67f/0xc60
   [6.1321]        kswapd+0x1dc/0x3e0
   [6.1643]        kthread+0xff/0x240
   [6.1965]        ret_from_fork+0x223/0x280
   [6.1287]        ret_from_fork_asm+0x1a/0x30
   [6.1616]
            other info that might help us debug this:

   [6.1561] Chain exists of:
              &delayed_node->mutex --> btrfs-tree-00 --> fs_reclaim

   [6.1503]  Possible unsafe locking scenario:

   [6.1110]        CPU0                    CPU1
   [6.1411]        ----                    ----
   [6.1707]   lock(fs_reclaim);
   [6.1998]                                lock(btrfs-tree-00);
   [6.1291]                                lock(fs_reclaim);
   [6.1581]   lock(&delayed_node->mutex);
   [6.1874]
             *** DEADLOCK ***

   [6.1716] 2 locks held by kswapd0/117:
   [6.1999]  #0: ffffffffa4ab8ce0 (fs_reclaim){+.+.}-{0:0}, at: balance_pgdat+0x195/0xc60
   [6.1294]  #1: ffff8d998344b0e0 (&type->s_umount_key#40){++++}- {3:3}, at: super_cache_scan+0x37/0x1d0
   [6.1596]
            stack backtrace:
   [6.1183] CPU: 11 UID: 0 PID: 117 Comm: kswapd0 Tainted: G     U 6.18.0+ #4 PREEMPT(lazy)
   [6.1185] Tainted: [U]=USER
   [6.1186] Hardware name: ASUS System Product Name/PRIME B560M-A AC, BIOS 2001 02/01/2023
   [6.1187] Call Trace:
   [6.1187]  <TASK>
   [6.1189]  dump_stack_lvl+0x6e/0xa0
   [6.1192]  print_circular_bug.cold+0x17a/0x1c0
   [6.1194]  check_noncircular+0x175/0x190
   [6.1197]  __lock_acquire+0x15e9/0x2820
   [6.1200]  lock_acquire+0xc9/0x2d0
   [6.1201]  ? __btrfs_release_delayed_node.part.0+0x39/0x2f0
   [6.1204]  __mutex_lock+0xcc/0x10a0
   [6.1206]  ? __btrfs_release_delayed_node.part.0+0x39/0x2f0
   [6.1208]  ? __btrfs_release_delayed_node.part.0+0x39/0x2f0
   [6.1211]  ? __btrfs_release_delayed_node.part.0+0x39/0x2f0
   [6.1213]  __btrfs_release_delayed_node.part.0+0x39/0x2f0
   [6.1215]  btrfs_evict_inode+0x20b/0x4b0
   [6.1217]  ? lock_acquire+0xc9/0x2d0
   [6.1220]  evict+0x15a/0x2f0
   [6.1222]  prune_icache_sb+0x91/0xd0
   [6.1224]  super_cache_scan+0x150/0x1d0
   [6.1226]  do_shrink_slab+0x155/0x6f0
   [6.1228]  shrink_slab+0x48e/0x890
   [6.1229]  ? shrink_slab+0x2d2/0x890
   [6.1231]  shrink_one+0x11a/0x1f0
   [6.1234]  shrink_node+0xbfd/0x1320
   [6.1236]  ? shrink_node+0xa2d/0x1320
   [6.1236]  ? shrink_node+0xbd3/0x1320
   [6.1239]  ? balance_pgdat+0x67f/0xc60
   [6.1239]  balance_pgdat+0x67f/0xc60
   [6.1241]  ? finish_task_switch.isra.0+0xc4/0x2a0
   [6.1246]  kswapd+0x1dc/0x3e0
   [6.1247]  ? __pfx_autoremove_wake_function+0x10/0x10
   [6.1249]  ? __pfx_kswapd+0x10/0x10
   [6.1250]  kthread+0xff/0x240
   [6.1251]  ? __pfx_kthread+0x10/0x10
   [6.1253]  ret_from_fork+0x223/0x280
   [6.1255]  ? __pfx_kthread+0x10/0x10
   [6.1257]  ret_from_fork_asm+0x1a/0x30
   [6.1260]  </TASK>

This is because:

1) The fsync task is holding an inode's delayed node mutex (for a
   directory) while calling __btrfs_update_delayed_inode() and that needs
   to do a search on the subvolume's btree (therefore read lock some
   extent buffers);

2) The lookup task, at btrfs_lookup(), triggered reclaim with the
   GFP_KERNEL allocation done by btrfs_init_file_extent_tree() while
   holding a read lock on a subvolume leaf;

3) The reclaim triggered kswapd which is doing inode eviction for the
   directory inode the fsync task is using as an argument to
   btrfs_commit_inode_delayed_inode() - but in that call chain we are
   trying to read lock the same leaf that the lookup task is holding
   while calling btrfs_init_file_extent_tree() and doing the GFP_KERNEL
   allocation.

Fix this by calling btrfs_init_file_extent_tree() after we don't need the
path anymore and release it in btrfs_read_locked_inode().

Reported-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://lore.kernel.org/linux-btrfs/6e55113a22347c3925458a5d840a18401a38b276.camel@linux.intel.com/
Fixes: 8679d2687c35 ("btrfs: initialize inode::file_extent_tree after i_mode has been set")
Reviewed-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2026-01-06 01:22:59 +01:00
Qu Wenruo
1aff297ffb btrfs: avoid access-beyond-folio for bs > ps encoded writes
[POTENTIAL BUG]
If the system page size is 4K and fs block size is 8K, and max_inline
mount option is set to 6K, we can inline a 6K sized data extent.

Then a encoded write submitted a compressed extent which is at file
offset 0, and the compressed length is 6K, which is allowed to be inlined.

Now a read beyond page boundary is triggered inside write_extent_buffer()
from insert_inline_extent().

[CAUSE]
Currently the function __cow_file_range_inline() can only accept a
single folio.

For regular compressed write path, we always allocate the compressed
folios using the minimal order matching the block size, thus the
@compressed_folio should always cover a full fs block thus it is fine.

But for encoded writes, they allocate page size folios, this means we
can hit a case where the compressed data is smaller than block size but
still larger than page size, in that case __cow_file_range_inline() will
be called with @compressed_size larger than a page.

In that case we will trigger a read beyond the folio inside
insert_inline_extent().

Thankfully this is not that common, as the default max_inline is only
2048 bytes, smaller than PAGE_SIZE, and bs > ps support is still
experimental.

[FIX]
We need to either allow insert_inline_extent() to accept a page array to
properly support such case, or reject such inline extent.

The latter is a much simpler solution, and considering bs > ps will stay
as a corner case and non-default max_inline will be even rarer, I don't
think we really need to fulfill such niche.

So just reject any inline extent that's larger than PAGE_SIZE, and add
an extra ASSERT() to insert_inline_extent() to catch such beyond-boundary
access.

Fixes: ec20799064c8 ("btrfs: enable encoded read/write/send for bs > ps cases")
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2026-01-06 01:22:59 +01:00
Alan Liu
72d7f45736 drm/amdgpu: Fix query for VPE block_type and ip_count
[Why]
Query for VPE block_type and ip_count is missing.

[How]
Add VPE case in ip_block_type and hw_ip_count query.

Reviewed-by: Lang Yu <lang.yu@amd.com>
Signed-off-by: Alan Liu <haoping.liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit a6ea0a430aca5932b9c75d8e38deeb45665dd2ae)
Cc: stable@vger.kernel.org
2026-01-05 17:34:48 -05:00
Timur Kristóf
e0d20a7658 drm/amd/display: Add missing encoder setup to DACnEncoderControl
Apparently the DAC encoder needs to be set up before use.
The BIOS parser in DC did not support this so I assumed it was
not necessary, but the DAC doesn't work without it on some GPUs.

Fixes: 69b29b894660 ("drm/amd/display: Hook up DAC to bios_parser_encoder_control")
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit bb5dfe2f5630ce344c654c705d28b4e20cb9d334)
2026-01-05 17:33:28 -05:00
Timur Kristóf
fd40c146c8 drm/amd/display: Correct color depth for SelectCRTC_Source
Pass the correct enum values as expected by the VBIOS.
Previously the actual bit depth integer value was passed,
which was a mistake.

Fixes: 7fb4f254c8eb ("drm/amd/display: Add SelectCRTC_Source to BIOS parser")
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit cdf6e4c0cdab129ffc4e41a8ac53a0738f805072)
2026-01-05 17:31:24 -05:00
Pratap Nirujogi
7ed51e3a13 drm/amd/amdgpu: Fix SMU warning during isp suspend-resume
ISP mfd child devices are using genpd and the system suspend-resume
operations between genpd and amdgpu parent device which uses only
runtime suspend-resume are not in sync.

Linux power manager during suspend-resume resuming the genpd devices
earlier than the amdgpu parent device. This is resulting in the below
warning as SMU is in suspended state when genpd attempts to resume ISP.

WARNING: CPU: 13 PID: 5435 at drivers/gpu/drm/amd/amdgpu/../pm/swsmu/amdgpu_smu.c:398 smu_dpm_set_power_gate+0x36f/0x380 [amdgpu]

To fix this warning isp suspend-resume is handled as part of amdgpu
parent device suspend-resume instead of genpd sequence. Each ISP MFD
child device is marked as dev_pm_syscore_device to skip genpd
suspend-resume and use pm_runtime_force api's to suspend-resume
the devices when callbacks from amdgpu are received.

Co-developed-by: Gjorgji Rosikopulos <grosikop@amd.com>
Signed-off-by: Gjorgji Rosikopulos <grosikop@amd.com>
Signed-off-by: Bin Du <bin.du@amd.com>
Signed-off-by: Pratap Nirujogi <pratap.nirujogi@amd.com>
Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit 0288a345f19b2162546352161509bb24614729e1)
2026-01-05 17:29:51 -05:00
Alex Deucher
531b432609 drm/amdgpu: always backup and reemit fences
If when we backup the ring contents for reemit before a
ring reset, we skip jobs associated with the bad
context, however, we need to make sure the fences
are reemited as unprocessed submissions may depend on
them.

v2: clean up fence handling, make helpers static

Reviewed-by: Timur Kristóf <timur.kristof@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit 155a748f14bc0b72783994dea7c5a12276730342)
2026-01-05 17:28:45 -05:00
Alex Deucher
9fc27cbabe drm/amdgpu: don't reemit ring contents more than once
If we cancel a bad job and reemit the ring contents, and
we get another timeout, cancel everything rather than reemitting.
The wptr markers are only relevant for the original emit.  If
we reemit, the wptr markers are no longer correct.

Reviewed-by: Timur Kristóf <timur.kristof@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit fb62a2067ca4555a6572d911e05919a311c010aa)
2026-01-05 17:28:45 -05:00
Yang Wang
dc8a887de1 drm/amd/pm: force send pcie parmater on navi1x
v1:
the PMFW didn't initialize the PCIe DPM parameters
and requires the KMD to actively provide these parameters.

v2:
clean & remove unused code logic (lijo)

Fixes: 1a18607c07bb ("drm/amd/pm: override pcie dpm parameters only if it is necessary")
Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4671
Signed-off-by: Yang Wang <kevinyang.wang@amd.com>
Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit b0dbd5db7cf1f81e4aaedd25cb5e72ce369387b2)
2026-01-05 17:28:45 -05:00
Yang Wang
4f74c2dd97 drm/amd/pm: fix wrong pcie parameter on navi1x
fix wrong pcie dpm parameter on navi1x

Fixes: 1a18607c07bb ("drm/amd/pm: override pcie dpm parameters only if it is necessary")
Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4671
Signed-off-by: Yang Wang <kevinyang.wang@amd.com>
Co-developed-by: Kenneth Feng <kenneth.feng@amd.com>
Signed-off-by: Kenneth Feng <kenneth.feng@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit 5c5189cf4b0cc0a22bac74a40743ee711cff07f8)
2026-01-05 17:24:46 -05:00
Alex Deucher
19158c7332 drm/radeon: Remove __counted_by from ClockInfoArray.clockInfo[]
clockInfo[] is a generic uchar pointer to variable sized structures
which vary from ASIC to ASIC.

Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4374
Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit dc135aa73561b5acc74eadf776e48530996529a3)
Cc: stable@vger.kernel.org
2026-01-05 17:23:41 -05:00
Nathan Chancellor
6ce6fbfddc drm/amd/display: Reduce number of arguments of dcn30's CalculateWatermarksAndDRAMSpeedChangeSupport()
CalculateWatermarksAndDRAMSpeedChangeSupport() has a large number of
parameters, which must be passed on the stack. Most of the parameters
between the two callsites are the same, so they can be accessed through
the existing mode_lib pointer, instead of being passed as explicit
arguments. Doing this reduces the stack size of
dml30_ModeSupportAndSystemConfigurationFull() from 1912 bytes to 1840
bytes building for x86_64 with clang-22, helping stay under the 2048
byte limit for display_mode_vba_30.c.

Additionally, now that there is a pointer to mode_lib->vba available,
use 'v' consistently throughout the entire function.

Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit 563dfbefdf633c8d958398ddfa3955f9f40e47d9)
2026-01-05 17:22:53 -05:00
Nathan Chancellor
f54a91f533 drm/amd/display: Reduce number of arguments of dcn30's CalculatePrefetchSchedule()
After an innocuous optimization change in clang-22,
dml30_ModeSupportAndSystemConfigurationFull() is over the 2048 byte
stack limit for display_mode_vba_30.c.

  drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn30/display_mode_vba_30.c:3529:6: warning: stack frame size (2096) exceeds limit (2048) in 'dml30_ModeSupportAndSystemConfigurationFull' [-Wframe-larger-than]
   3529 | void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
        |      ^

With clang-21, this function was already close to the limit:

  drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn30/display_mode_vba_30.c:3529:6: warning: stack frame size (1912) exceeds limit (1586) in 'dml30_ModeSupportAndSystemConfigurationFull' [-Wframe-larger-than]
   3529 | void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
        |      ^

CalculatePrefetchSchedule() has a large number of parameters, which must
be passed on the stack. Most of the parameters between the two callsites
are the same, so they can be accessed through the existing mode_lib
pointer, instead of being passed as explicit arguments. Doing this
reduces the stack size of dml30_ModeSupportAndSystemConfigurationFull()
from 2096 bytes to 1912 bytes with clang-22.

Closes: https://github.com/ClangBuiltLinux/linux/issues/2117
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit b20b3fc4210f83089f835cdb91deec4b0778761a)
2026-01-05 17:22:47 -05:00
Nathan Chancellor
7074045437 drm/amd/display: Apply e4479aecf658 to dml
After an innocuous optimization change in clang-22, allmodconfig (which
enables CONFIG_KASAN and CONFIG_WERROR) breaks with:

  drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/display_mode_vba_32.c:1724:6: error: stack frame size (3144) exceeds limit (3072) in 'dml32_ModeSupportAndSystemConfigurationFull' [-Werror,-Wframe-larger-than]
   1724 | void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
        |      ^

With clang-21, this function was already pretty close to the existing
limit of 3072 bytes.

  drivers/gpu/drm/amd/amdgpu/../display/dc/dml/dcn32/display_mode_vba_32.c:1724:6: error: stack frame size (2904) exceeds limit (2048) in 'dml32_ModeSupportAndSystemConfigurationFull' [-Werror,-Wframe-larger-than]
   1724 | void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
        |      ^

A similar situation occurred in dml2, which was resolved by
commit e4479aecf658 ("drm/amd/display: Increase sanitizer frame larger
than limit when compile testing with clang") by increasing the limit for
clang when compile testing with certain sanitizer enabled, so that
allmodconfig (an easy testing target) continues to work.

Apply that same change to the dml folder to clear up the warning for
allmodconfig, unbreaking the build.

Closes: https://github.com/ClangBuiltLinux/linux/issues/2135
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit 25314b453cf812150e9951a32007a32bba85707e)
Cc: stable@vger.kernel.org
2026-01-05 17:22:22 -05:00
Malaya Kumar Rout
7966cf0ebe PM: hibernate: Fix crash when freeing invalid crypto compressor
When crypto_alloc_acomp() fails, it returns an ERR_PTR value, not NULL.

The cleanup code in save_compressed_image() and load_compressed_image()
unconditionally calls crypto_free_acomp() without checking for ERR_PTR,
which causes crypto_acomp_tfm() to dereference an invalid pointer and
crash the kernel.

This can be triggered when the compression algorithm is unavailable
(e.g., CONFIG_CRYPTO_LZO not enabled).

Fix by adding IS_ERR_OR_NULL() checks before calling crypto_free_acomp()
and acomp_request_free(), similar to the existing kthread_stop() check.

Fixes: b03d542c3c95 ("PM: hibernate: Use crypto_acomp interface")
Signed-off-by: Malaya Kumar Rout <mrout@redhat.com>
Cc: 6.15+ <stable@vger.kernel.org> # 6.15+
[ rjw: Added 2 empty code lines ]
Link: https://patch.msgid.link/20251230115613.64080-1-mrout@redhat.com
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2026-01-05 19:12:56 +01:00
Lorenzo Pieralisi
1ca8677d9f ACPI: PCI: IRQ: Fix INTx GSIs signedness
In ACPI Global System Interrupts (GSIs) are described using a 32-bit
value.

ACPI/PCI legacy interrupts (INTx) parsing code treats GSIs as 'int',
which poses issues if the GSI interrupt value is a 32-bit value with the
MSB set (as required in some interrupt configurations - eg ARM64 GICv5
systems) because acpi_pci_link_allocate_irq() treats a negative gsi
return value as a failed GSI allocation (and acpi_irq_get_penalty()
would trigger an out-of-bounds array dereference if the 'irq' param is
a negative value).

Fix ACPI/PCI legacy INTx parsing by converting variables representing
GSIs from 'int' to 'u32' bringing the code in line with the ACPI
specification and fixing the current parsing issue.

Signed-off-by: Lorenzo Pieralisi <lpieralisi@kernel.org>
Reviewed-by: Bjorn Helgaas <bhelgaas@google.com>
Link: https://patch.msgid.link/20260105101705.36703-1-lpieralisi@kernel.org
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2026-01-05 19:06:40 +01:00
Bjorn Helgaas
df27c03b9e PCI: meson: Report that link is up while in ASPM L0s and L1 states
Previously meson_pcie_link_up() only returned true if the link was in the
L0 state.  This was incorrect because hardware autonomously manages
transitions between L0, L0s, and L1 while both components on the link stay
in D0.  Those states should all be treated as "link is active".

Returning false when the device was in L0s or L1 broke config accesses
because dw_pcie_other_conf_map_bus() fails if the link is down, which
caused errors like this:

  meson-pcie fc000000.pcie: error: wait linkup timeout
  pci 0000:01:00.0: BAR 0: error updating (0xfc700004 != 0xffffffff)

Remove the LTSSM state check, timeout, speed check, and error message from
meson_pcie_link_up(), the dw_pcie_ops.link_up() method, so it is a simple
boolean check of whether the link is active.  Timeouts and error messages
are handled at a higher level, e.g., dw_pcie_wait_for_link().

Fixes: 9c0ef6d34fdb ("PCI: amlogic: Add the Amlogic Meson PCIe controller driver")
Reported-by: Linnaea Lavia <linnaea-von-lavia@live.com>
Closes: https://lore.kernel.org/r/DM4PR05MB102707B8CDF84D776C39F22F2C7F0A@DM4PR05MB10270.namprd05.prod.outlook.com
[bhelgaas: squash removal of unused WAIT_LINKUP_TIMEOUT by
Martin Blumenstingl <martin.blumenstingl@googlemail.com>:
https://patch.msgid.link/20260105125625.239497-1-martin.blumenstingl@googlemail.com]
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Tested-by: Linnaea Lavia <linnaea-von-lavia@live.com>
Tested-by: Neil Armstrong <neil.armstrong@linaro.org> # on BananaPi M2S
Reviewed-by: Neil Armstrong <neil.armstrong@linaro.org>
Cc: stable@vger.kernel.org
Link: https://patch.msgid.link/20251103221930.1831376-1-helgaas@kernel.org
Link: https://patch.msgid.link/20260105125625.239497-1-martin.blumenstingl@googlemail.com
2026-01-05 11:20:29 -06:00
Dave Airlie
c4f2ae5386 DRM Rust fixes for v6.19-rc4
MAINTAINERS:
   - Fix Nova GPU driver git links.
   - Fix typo in TYR driver entry preventing correct behavior of
     scripts/get_maintainer.pl.
   - Exclude TYR driver from DRM MISC.
 
 Nova Core:
   - Correctly select RUST_FW_LOADER_ABSTRACTIONS to prevent build
     errors.
   - Regenerate nova-core bindgen bindings with '--explicit-padding' to
     avoid uninitialized bytes.
   - Fix length of received GSP messages, due to miscalculated message
     payload size.
   - Regenerate bindings to derive MaybeZeroable.
   - Use a bindings alias to derive the firmware version.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQS2q/xV6QjXAdC7k+1FlHeO1qrKLgUCaVKmRwAKCRBFlHeO1qrK
 LoMTAQDYwgJKxyoKkxY6cRXdWkxDNCueEhu4JUuaCZ4x3DAdZQD6AzrPdaQ08cTn
 FUpyXS4aM7AtbOSHzM6wMm7kH6pGqgc=
 =us3l
 -----END PGP SIGNATURE-----

Merge tag 'drm-rust-fixes-2025-12-29' of https://gitlab.freedesktop.org/drm/rust/kernel into drm-fixes

DRM Rust fixes for v6.19-rc4

MAINTAINERS:
  - Fix Nova GPU driver git links.
  - Fix typo in TYR driver entry preventing correct behavior of
    scripts/get_maintainer.pl.
  - Exclude TYR driver from DRM MISC.

Nova Core:
  - Correctly select RUST_FW_LOADER_ABSTRACTIONS to prevent build
    errors.
  - Regenerate nova-core bindgen bindings with '--explicit-padding' to
    avoid uninitialized bytes.
  - Fix length of received GSP messages, due to miscalculated message
    payload size.
  - Regenerate bindings to derive MaybeZeroable.
  - Use a bindings alias to derive the firmware version.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: "Danilo Krummrich" <dakr@kernel.org>
Link: https://patch.msgid.link/DFATYVSQRQ4W.1R030NZ34XUZK@kernel.org
2026-01-05 14:45:33 +10:00
Dave Airlie
e8b3627bec nouveau: don't attempt fwsec on sb on newer platforms.
The changes to always loads fwsec sb causes problems on newer GPUs
which don't use this path.

Add hooks and pass through the device specific layers.

Fixes: da67179e5538 ("drm/nouveau/gsp: Allocate fwsec-sb at boot")
Cc: <stable@vger.kernel.org> # v6.16+
Cc: Lyude Paul <lyude@redhat.com>
Cc: Timur Tabi <ttabi@nvidia.com>
Tested-by: Matthew Schwartz <matthew.schwartz@linux.dev>
Tested-by: Christopher Snowhill <chris@kode54.net>
Reviewed-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Link: https://patch.msgid.link/20260102041829.2748009-1-airlied@gmail.com
2026-01-04 16:55:38 +10:00
Tomi Valkeinen
2fc04340cf drm/tidss: Fix enable/disable order
TI's OLDI and DSI encoders need to be set up before the crtc is enabled,
but the DRM helpers will enable the crtc first. This causes various
issues on TI platforms, like visual artifacts or crtc sync lost
warnings.

Thus drm_atomic_helper_commit_modeset_enables() and
drm_atomic_helper_commit_modeset_disables() cannot be used, as they
enable the crtc before bridges' pre-enable, and disable the crtc after
bridges' post-disable.

Open code the drm_atomic_helper_commit_modeset_enables() and
drm_atomic_helper_commit_modeset_disables(), and first call the bridges'
pre-enables, then crtc enable, then bridges' post-enable (and vice versa
for disable).

Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
Cc: stable@vger.kernel.org # v6.17+
Fixes: c9b1150a68d9 ("drm/atomic-helper: Re-order bridge chain pre-enable and post-disable")
Reviewed-by: Aradhya Bhatia <aradhya.bhatia@linux.dev>
Reviewed-by: Maxime Ripard <mripard@kernel.org>
Reviewed-by: Linus Walleij <linusw@kernel.org>
Tested-by: Linus Walleij <linusw@kernel.org>
Signed-off-by: Linus Walleij <linusw@kernel.org>
Link: https://patch.msgid.link/20251205-drm-seq-fix-v1-4-fda68fa1b3de@ideasonboard.com
2026-01-03 20:08:24 +01:00
Linus Walleij
d1c7dc57ff drm/atomic-helper: Export and namespace some functions
Export and namespace those not prefixed with drm_* so
it becomes possible to write custom commit tail functions
in individual drivers using the helper infrastructure.

Tested-by: Marek Vasut <marek.vasut+renesas@mailbox.org>
Reviewed-by: Maxime Ripard <mripard@kernel.org>
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
Cc: stable@vger.kernel.org # v6.17+
Fixes: c9b1150a68d9 ("drm/atomic-helper: Re-order bridge chain pre-enable and post-disable")
Reviewed-by: Aradhya Bhatia <aradhya.bhatia@linux.dev>
Reviewed-by: Linus Walleij <linusw@kernel.org>
Tested-by: Linus Walleij <linusw@kernel.org>
Signed-off-by: Linus Walleij <linusw@kernel.org>
Link: https://patch.msgid.link/20251205-drm-seq-fix-v1-3-fda68fa1b3de@ideasonboard.com
2026-01-03 20:08:17 +01:00
Tomi Valkeinen
33e8150bd3 Revert "drm/mediatek: dsi: Fix DSI host and panel bridge pre-enable order"
This reverts commit f5b1819193667bf62c3c99d3921b9429997a14b2.

As the original commit (c9b1150a68d9 ("drm/atomic-helper: Re-order
bridge chain pre-enable and post-disable")) causing the issue has been
reverted, let's revert the fix for mediatek.

Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
Cc: stable@vger.kernel.org # v6.17+
Fixes: c9b1150a68d9 ("drm/atomic-helper: Re-order bridge chain pre-enable and post-disable")
Reviewed-by: Maxime Ripard <mripard@kernel.org>
Reviewed-by: Linus Walleij <linusw@kernel.org>
Tested-by: Linus Walleij <linusw@kernel.org>
Signed-off-by: Linus Walleij <linusw@kernel.org>
Link: https://patch.msgid.link/20251205-drm-seq-fix-v1-2-fda68fa1b3de@ideasonboard.com
2026-01-03 20:08:10 +01:00
Tomi Valkeinen
c1ef9a6cab Revert "drm/atomic-helper: Re-order bridge chain pre-enable and post-disable"
This reverts commit c9b1150a68d9362a0827609fc0dc1664c0d8bfe1.

Changing the enable/disable sequence has caused regressions on multiple
platforms: R-Car, MCDE, Rockchip. A series (see link below)  was sent to
fix these, but it was decided that it's better to revert the original
patch and change the enable/disable sequence only in the tidss driver.

Reverting this commit breaks tidss's DSI and OLDI outputs, which will be
fixed in the following commits.

Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
Link: https://lore.kernel.org/all/20251202-mcde-drm-regression-thirdfix-v6-0-f1bffd4ec0fa%40kernel.org/
Fixes: c9b1150a68d9 ("drm/atomic-helper: Re-order bridge chain pre-enable and post-disable")
Cc: stable@vger.kernel.org # v6.17+
Reviewed-by: Aradhya Bhatia <aradhya.bhatia@linux.dev>
Reviewed-by: Maxime Ripard <mripard@kernel.org>
Reviewed-by: Linus Walleij <linusw@kernel.org>
Tested-by: Linus Walleij <linusw@kernel.org>
Signed-off-by: Linus Walleij <linusw@kernel.org>
Link: https://patch.msgid.link/20251205-drm-seq-fix-v1-1-fda68fa1b3de@ideasonboard.com
2026-01-03 20:06:54 +01:00
Abdun Nihaal
1e876e5a08 gpio: mpsse: fix reference leak in gpio_mpsse_probe() error paths
The reference obtained by calling usb_get_dev() is not released in the
gpio_mpsse_probe() error paths. Fix that by using device managed helper
functions. Also remove the usb_put_dev() call in the disconnect function
since now it will be released automatically.

Cc: stable@vger.kernel.org
Fixes: c46a74ff05c0 ("gpio: add support for FTDI's MPSSE as GPIO")
Signed-off-by: Abdun Nihaal <nihaal@cse.iitm.ac.in>
Link: https://lore.kernel.org/r/20251226060414.20785-1-nihaal@cse.iitm.ac.in
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
2026-01-02 10:45:37 +01:00
Ernest Van Hoecke
014a17deb4 gpio: pca953x: handle short interrupt pulses on PCAL devices
GPIO drivers with latch input support may miss short pulses on input
pins even when input latching is enabled. The generic interrupt logic in
the pca953x driver reports interrupts by comparing the current input
value against the previously sampled one and only signals an event when
a level change is observed between two reads.

For short pulses, the first edge is captured when the input register is
read, but if the signal returns to its previous level before the read,
the second edge is not observed. As a result, successive pulses can
produce identical input values at read time and no level change is
detected, causing interrupts to be missed. Below timing diagram shows
this situation where the top signal is the input pin level and the
bottom signal indicates the latched value.

─────┐     ┌──*───────────────┐     ┌──*─────────────────┐     ┌──*───
     │     │  .               │     │  .                 │     │  .
     │     │  │               │     │  │                 │     │  │
     └──*──┘  │               └──*──┘  │                 └──*──┘  │
Input   │     │                  │     │                    │     │
        ▼     │                  ▼     │                    ▼     │
       IRQ    │                 IRQ    │                   IRQ    │
              .                        .                          .
─────┐        .┌──────────────┐        .┌────────────────┐        .┌──
     │         │              │         │                │         │
     │         │              │         │                │         │
     └────────*┘              └────────*┘                └────────*┘
Latched       │                        │                          │
              ▼                        ▼                          ▼
            READ 0                   READ 0                     READ 0
                                   NO CHANGE                  NO CHANGE

PCAL variants provide an interrupt status register that records which
pins triggered an interrupt, but the status and input registers cannot
be read atomically. The interrupt status is only cleared when the input
port is read, and the input value must also be read to determine the
triggering edge. If another interrupt occurs on a different line after
the status register has been read but before the input register is
sampled, that event will not be reflected in the earlier status
snapshot, so relying solely on the interrupt status register is also
insufficient.

Support for input latching and interrupt status handling was previously
added by [1], but the interrupt status-based logic was reverted by [2]
due to these issues. This patch addresses the original problem by
combining both sources of information. Events indicated by the interrupt
status register are merged with events detected through the existing
level-change logic. As a result:

* short pulses, whose second edges are invisible, are detected via the
  interrupt status register, and
* interrupts that occur between the status and input reads are still
  caught by the generic level-change logic.

This significantly improves robustness on devices that signal interrupts
as short pulses, while avoiding the issues that led to the earlier
reversion. In practice, even if only the first edge of a pulse is
observable, the interrupt is reliably detected.

This fixes missed interrupts from an Ilitek touch controller with its
interrupt line connected to a PCAL6416A, where active-low pulses are
approximately 200 us long.

[1] commit 44896beae605 ("gpio: pca953x: add PCAL9535 interrupt support for Galileo Gen2")
[2] commit d6179f6c6204 ("gpio: pca953x: Improve interrupt support")

Fixes: d6179f6c6204 ("gpio: pca953x: Improve interrupt support")
Signed-off-by: Ernest Van Hoecke <ernest.vanhoecke@toradex.com>
Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Link: https://lore.kernel.org/r/20251217153050.142057-1-ernestvanhoecke@gmail.com
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
2026-01-02 10:37:35 +01:00
Paweł Narewski
a7ac22d53d gpiolib: fix race condition for gdev->srcu
If two drivers were calling gpiochip_add_data_with_key(), one may be
traversing the srcu-protected list in gpio_name_to_desc(), meanwhile
other has just added its gdev in gpiodev_add_to_list_unlocked().
This creates a non-mutexed and non-protected timeframe, when one
instance is dereferencing and using &gdev->srcu, before the other
has initialized it, resulting in crash:

[    4.935481] Unable to handle kernel paging request at virtual address ffff800272bcc000
[    4.943396] Mem abort info:
[    4.943400]   ESR = 0x0000000096000005
[    4.943403]   EC = 0x25: DABT (current EL), IL = 32 bits
[    4.943407]   SET = 0, FnV = 0
[    4.943410]   EA = 0, S1PTW = 0
[    4.943413]   FSC = 0x05: level 1 translation fault
[    4.943416] Data abort info:
[    4.943418]   ISV = 0, ISS = 0x00000005, ISS2 = 0x00000000
[    4.946220]   CM = 0, WnR = 0, TnD = 0, TagAccess = 0
[    4.955261]   GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0
[    4.955268] swapper pgtable: 4k pages, 48-bit VAs, pgdp=0000000038e6c000
[    4.961449] [ffff800272bcc000] pgd=0000000000000000
[    4.969203] , p4d=1000000039739003
[    4.979730] , pud=0000000000000000
[    4.980210] phandle (CPU): 0x0000005e, phandle (BE): 0x5e000000 for node "reset"
[    4.991736] Internal error: Oops: 0000000096000005 [#1] PREEMPT SMP
...
[    5.121359] pc : __srcu_read_lock+0x44/0x98
[    5.131091] lr : gpio_name_to_desc+0x60/0x1a0
[    5.153671] sp : ffff8000833bb430
[    5.298440]
[    5.298443] Call trace:
[    5.298445]  __srcu_read_lock+0x44/0x98
[    5.309484]  gpio_name_to_desc+0x60/0x1a0
[    5.320692]  gpiochip_add_data_with_key+0x488/0xf00
    5.946419] ---[ end trace 0000000000000000 ]---

Move initialization code for gdev fields before it is added to
gpio_devices, with adjacent initialization code.
Adjust goto statements  to reflect modified order of operations

Fixes: 47d8b4c1d868 ("gpio: add SRCU infrastructure to struct gpio_device")
Reviewed-by: Jakub Lewalski <jakub.lewalski@nokia.com>
Signed-off-by: Paweł Narewski <pawel.narewski@nokia.com>
[Bartosz: fixed a build issue, removed stray newline]
Link: https://lore.kernel.org/r/20251224082641.10769-1-bartosz.golaszewski@oss.qualcomm.com
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
2026-01-02 10:35:55 +01:00
Bartosz Golaszewski
49416483a9 gpio: shared: allow sharing a reset-gpios pin between reset-gpio and gpiolib
We currently support sharing GPIOs between multiple devices whose drivers
use either the GPIOLIB API *OR* the reset control API but not both at
the same time.

There's an unlikely corner-case where a reset-gpios pin can be shared by
one driver using the GPIOLIB API and a second using the reset API. This
will currently not work as the reset-gpio consumers are not described in
firmware at the time of scanning so the shared GPIO just chooses one of
the proxies created for the consumers when the reset-gpio driver performs
the lookup. This can of course conflict in the case described above.

In order to fix it: if we deal with the "reset-gpios" pin that's shared
acconding to the firmware node setup, create a proxy for each described
consumer as well as another one for the potential reset-gpio device. To
that end: rework the matching to take this into account.

Fixes: 7b78b26757e0 ("gpio: shared: handle the reset-gpios corner case")
Link: https://lore.kernel.org/r/20251222-gpio-shared-reset-gpio-proxy-v1-3-8d4bba7d8c14@oss.qualcomm.com
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
2026-01-02 10:34:32 +01:00
Bartosz Golaszewski
cb0451e33b gpio: shared: verify con_id when adding proxy lookup
When matching the firmware node with the potential consumer, we
currently omit the con_id string. This can lead to false positives in
the unlikely case of the consumer having been assigned more than one
shared GPIO. Check the connector ID before proceeding.

Fixes: a060b8c511ab ("gpiolib: implement low-level, shared GPIO support")
Link: https://lore.kernel.org/r/20251222-gpio-shared-reset-gpio-proxy-v1-2-8d4bba7d8c14@oss.qualcomm.com
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
2026-01-02 10:34:31 +01:00
Bartosz Golaszewski
9700b0fccf gpiolib: allow multiple lookup tables per consumer
The GPIO machine lookup mechanism dates back to old ARM board files
where lookup tables would all be defined in a single place. Since then,
lookup tables have also been used to address various DT and ACPI quirks
like missing GPIOs and - as of recently - setting up shared GPIO proxy
devices.

The lookup itself stops when we find the first matching table for a
consumer and - if it doesn't contain the right entry - the lookup fails.

Since the tables can now be defined in multiple places and we can't know
how many there are, effectively limiting a consumer to only a single
lookup table is no longer viable.

Rework the code to always look through all tables matching the consumer.

Link: https://lore.kernel.org/r/20251222-gpio-shared-reset-gpio-proxy-v1-1-8d4bba7d8c14@oss.qualcomm.com
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
2026-01-02 10:34:31 +01:00
Miaoqian Lin
0ddd3bb4b1 drm/pl111: Fix error handling in pl111_amba_probe
Jump to the existing dev_put label when devm_request_irq() fails
so drm_dev_put() and of_reserved_mem_device_release() run
instead of returning early and leaking resources.

Found via static analysis and code review.

Fixes: bed41005e617 ("drm/pl111: Initial drm/kms driver for pl111")
Cc: stable@vger.kernel.org
Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
Signed-off-by: Linus Walleij <linusw@kernel.org>
Link: https://patch.msgid.link/20251211123345.2392065-1-linmq006@gmail.com
2025-12-31 22:42:26 +01:00
Manivannan Sadhasivam
0cc13256b6 PCI: qcom: Remove ASPM L0s support for MSM8996 SoC
Though I couldn't confirm ASPM L0s support with the Qcom hardware team, a
bug report from Dmitry suggests that L0s is broken on this legacy SoC.
Hence, remove L0s support from the Root Port Link Capabilities in this SoC.

Since qcom_pcie_clear_aspm_l0s() is now used by more than one SoC config,
call it from qcom_pcie_host_init() instead.

Reported-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
Closes: https://lore.kernel.org/linux-pci/4cp5pzmlkkht2ni7us6p3edidnk25l45xrp6w3fxguqcvhq2id@wjqqrdpkypkf
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
Signed-off-by: Manivannan Sadhasivam <mani@kernel.org>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Tested-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
Link: https://patch.msgid.link/20251126081718.8239-1-mani@kernel.org
2025-12-30 11:12:22 -06:00
Harshita Bhilwaria
961ac9d97b crypto: qat - fix duplicate restarting msg during AER error
The restarting message from PF to VF is sent twice during AER error
handling: once from adf_error_detected() and again from
adf_disable_sriov().
This causes userspace subservices to shutdown unexpectedly when they
receive a duplicate restarting message after already being restarted.

Avoid calling adf_pf2vf_notify_restarting() and
adf_pf2vf_wait_for_restarting_complete() from adf_error_detected() so
that the restarting msg is sent only once from PF to VF.

Fixes: 9567d3dc760931 ("crypto: qat - improve aer error reset handling")
Signed-off-by: Harshita Bhilwaria <harshita.bhilwaria@intel.com>
Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: Ahsan Atta <ahsan.atta@intel.com>
Reviewed-by: Ravikumar PM <ravikumar.pm@intel.com>
Reviewed-by: Srikanth Thokala <srikanth.thokala@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2025-12-29 08:44:14 +08:00
Danilo Krummrich
97872fa28b MAINTAINERS: exclude the tyr driver from DRM MISC
The ARM MALI TYR DRM DRIVER is already maintained through the drm-rust
tree, hence exclude it from drm-misc.

Fixes: cf4fd52e3236 ("rust: drm: Introduce the Tyr driver for Arm Mali GPUs")
Signed-off-by: Danilo Krummrich <dakr@kernel.org>
Link: https://patch.msgid.link/20251223120436.33233-1-dakr@kernel.org
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
2025-12-26 12:14:40 +00:00
Danilo Krummrich
de0bdcaf36 MAINTAINERS: fix typo in TYR DRM driver entry
Fix a missing ':' in the ARM MALI TYR DRM DRIVER entry, which does
prevent script/get_maintainer.pl to properly work and pick up the
corresponding maintainers.

Fixes: cf4fd52e3236 ("rust: drm: Introduce the Tyr driver for Arm Mali GPUs")
Reported-by: Tamir Duberstein <tamird@kernel.org>
Closes: https://lore.kernel.org/lkml/CAJ-ks9mrZtnPUjp5tD03hW+TyS0M9i-KRF_ramNY-oh-0X+ayA@mail.gmail.com/
Signed-off-by: Danilo Krummrich <dakr@kernel.org>
Reviewed-by: Tamir Duberstein <tamird@kernel.org>
Link: https://patch.msgid.link/20251223115949.32531-1-dakr@kernel.org
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
2025-12-26 12:13:21 +00:00
Christian Brauner
78c850021d
Merge patch series "Fix two regressions from start_creating()/start_removing() conversion"
Tyler Hicks <code@tyhicks.com> says:

When running the eCryptfs test suite on v6.19-rc2, I noticed BUG splats
from every test and that the umount utility was segfaulting when tearing
down after a test. Bisection led me to commit f046fbb4d81d ("ecryptfs:
use new start_creating/start_removing APIs").

This patch series addresses that regression and also a mknod problem
spotted during code review.

* patches from https://patch.msgid.link/20251223194153.2818445-1-code@tyhicks.com:
  ecryptfs: Release lower parent dentry after creating dir
  ecryptfs: Fix improper mknod pairing of start_creating()/end_removing()

Link: https://patch.msgid.link/20251223194153.2818445-1-code@tyhicks.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-12-24 13:58:10 +01:00
Tyler Hicks
5c56afd204
ecryptfs: Release lower parent dentry after creating dir
Fix a mkdir-induced usage count imbalance that tripped a umount_check()
BUG while unmounting the lower filesystem. Commit f046fbb4d81d
("ecryptfs: use new start_creating/start_removing APIs") added a new
dget() of the lower parent dir, in ecryptfs_mkdir(), but did not dput()
the dentry before returning from that function.

The BUG output as seen while running the eCryptfs test suite:

$ ./run_tests.sh -b 131072 -c safe,destructive -f ext4 -K -t lp-926292.sh
...
Running eCryptfs filesystem tests on ext4
lp-926292
------------[ cut here ]------------
BUG: Dentry ffff8e6692d11988{i=c,n=ECRYPTFS_FNEK_ENCRYPTED.FXZuRGZL7QAFtER.JeA46DtdKqkkQx9H2Vpmv234J5CU8YSsrUwZJK4AbXbrN5WkZ348wnqstovKKxA-}  still in use (1) [unmount of ext4 loop0]
WARNING: CPU: 7 PID: 950 at fs/dcache.c:1590 umount_check+0x5e/0x80
Modules linked in: md5 libmd5 ecryptfs encrypted_keys ext4 crc16 mbcache jbd2
CPU: 7 UID: 0 PID: 950 Comm: umount Not tainted 6.18.0-rc1-00013-gf046fbb4d81d #17 PREEMPT(full)
Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
RIP: 0010:umount_check+0x5e/0x80
Code: 88 38 06 00 00 48 8b 40 28 4c 8b 08 48 8b 46 68 48 85 c0 74 04 48 8b 50 38 51 48 c7 c7 60 32 9c b5 48 89 f1 e8 43 5e ca ff 90 <0f> 0b 90 90 58 31 c0 e9 46 9d 6c 00 41 83 f8 01 75 b8 eb a3 66 66
RSP: 0018:ffffa19940c4bdd0 EFLAGS: 00010282
RAX: 0000000000000000 RBX: ffff8e6692fad4c0 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffa19940c4bc70 RDI: 00000000ffffffff
RBP: ffffffffb4eb5930 R08: 00000000ffffdfff R09: 0000000000000001
R10: 00000000ffffdfff R11: ffffffffb5c8a9e0 R12: ffff8e6692fad4c0
R13: ffff8e6692fad4c0 R14: ffff8e6692d11a40 R15: ffff8e6692d11988
FS:  00007f6b4b491800(0000) GS:ffff8e670506e000(0000) knlGS:0000000000000000
CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007f6b4b5f8d40 CR3: 0000000114eb7001 CR4: 0000000000772ef0
PKRU: 55555554
Call Trace:
 <TASK>
 d_walk+0xfd/0x370
 shrink_dcache_for_umount+0x4d/0x140
 generic_shutdown_super+0x20/0x160
 kill_block_super+0x1a/0x40
 ext4_kill_sb+0x22/0x40 [ext4]
 deactivate_locked_super+0x33/0xa0
 cleanup_mnt+0xba/0x150
 task_work_run+0x5c/0xa0
 exit_to_user_mode_loop+0xac/0xb0
 do_syscall_64+0x2ab/0xfa0
 entry_SYSCALL_64_after_hwframe+0x77/0x7f
RIP: 0033:0x7f6b4b6c2a2b
Code: c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 f3 0f 1e fa 31 f6 e9 05 00 00 00 0f 1f 44 00 00 f3 0f 1e fa b8 a6 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 05 c3 0f 1f 40 00 48 8b 15 b9 83 0d 00 f7 d8
RSP: 002b:00007ffcd5b8b498 EFLAGS: 00000246 ORIG_RAX: 00000000000000a6
RAX: 0000000000000000 RBX: 000055b84af0b9e0 RCX: 00007f6b4b6c2a2b
RDX: 0000000000000000 RSI: 0000000000000000 RDI: 000055b84af0bdf0
RBP: 00007ffcd5b8b570 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000103 R11: 0000000000000246 R12: 000055b84af0bae0
R13: 0000000000000000 R14: 000055b84af0bdf0 R15: 0000000000000000
 </TASK>
---[ end trace 0000000000000000 ]---
EXT4-fs (loop0): unmounting filesystem 00d9ea41-f61e-43d0-a449-6be03e7e8428.
EXT4-fs (loop0): sb orphan head is 12
sb_info orphan list:
  inode loop0:12 at ffff8e66950e1df0: mode 40700, nlink 0, next 0
Assertion failure in ext4_put_super() at fs/ext4/super.c:1345: 'list_empty(&sbi->s_orphan)'

Fixes: f046fbb4d81d ("ecryptfs: use new start_creating/start_removing APIs")
Signed-off-by: Tyler Hicks <code@tyhicks.com>
Link: https://patch.msgid.link/20251223194153.2818445-3-code@tyhicks.com
Reviewed-by: Amir Goldstein <amir73il@gmail.com>
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-12-24 13:58:04 +01:00
Tyler Hicks
5f9ad16bcc
ecryptfs: Fix improper mknod pairing of start_creating()/end_removing()
The ecryptfs_start_creating_dentry() function must be paired with the
end_creating() function. Fix ecryptfs_mknod() so that end_creating() is
properly called in the return path, instead of end_removing().

Fixes: f046fbb4d81d ("ecryptfs: use new start_creating/start_removing APIs")
Signed-off-by: Tyler Hicks <code@tyhicks.com>
Link: https://patch.msgid.link/20251223194153.2818445-2-code@tyhicks.com
Reviewed-by: Amir Goldstein <amir73il@gmail.com>
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-12-24 13:58:04 +01:00
Al Viro
3dd57ddec9
get rid of bogus __user in struct xattr_args::value
The first member of struct xattr_args is declared as
	__aligned_u64 __user value;
which makes no sense whatsoever; __user is a qualifier and what that
declaration says is "all struct xattr_args instances have .value
_stored_ in user address space, no matter where the rest of the
structure happens to be".

	Something like "int __user *p" stands for "value of p is a pointer
to an instance of int that happens to live in user address space"; it
says nothing about location of p itself, just as const char *p declares a
pointer to unmodifiable char rather than an unmodifiable pointer to char.

	With xattr_args the intent clearly had been "the 64bit value
represents a _pointer_ to object in user address space", but __user has
nothing to do with that.  All it gets us is a couple of bogus warnings
in fs/xattr.c where (userland) instance of xattr_args is copied to local
variable of that type (in kernel address space), followed by access
to its members.  Since we've told sparse that args.value must somehow be
located in userland memory, we get warned that looking at that 64bit
unsigned integer (in a variable already on kernel stack) is not allowed.

	Note that sparse has no way to express "this integer shall never
be cast into a pointer to be dereferenced directly" and I don't see any
way to assign a sane semantics to that.  In any case, __user is not it.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Link: https://patch.msgid.link/20251216081939.GQ1712166@ZenIV
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-12-24 13:52:50 +01:00
Bagas Sanjaya
73a91ef328
VFS: fix __start_dirop() kernel-doc warnings
Sphinx report kernel-doc warnings:

WARNING: ./fs/namei.c:2853 function parameter 'state' not described in '__start_dirop'
WARNING: ./fs/namei.c:2853 expecting prototype for start_dirop(). Prototype was for __start_dirop() instead

Fix them up.

Fixes: ff7c4ea11a05c8 ("VFS: add start_creating_killable() and start_removing_killable()")
Signed-off-by: Bagas Sanjaya <bagasdotme@gmail.com>
Link: https://patch.msgid.link/20251219024620.22880-3-bagasdotme@gmail.com
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-12-24 13:33:24 +01:00
Bagas Sanjaya
fe33729d29
fs: Describe @isnew parameter in ilookup5_nowait()
Sphinx reports kernel-doc warning:

WARNING: ./fs/inode.c:1607 function parameter 'isnew' not described in 'ilookup5_nowait'

Describe the parameter.

Fixes: a27628f4363435 ("fs: rework I_NEW handling to operate without fences")
Signed-off-by: Bagas Sanjaya <bagasdotme@gmail.com>
Link: https://patch.msgid.link/20251219024620.22880-2-bagasdotme@gmail.com
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-12-24 13:33:24 +01:00
Mateusz Guzik
46af9ae130
fs: make sure to fail try_to_unlazy() and try_to_unlazy() for LOOKUP_CACHED
Otherwise the slowpath can be taken by the caller, defeating the flag.

This regressed after calls to legitimize_links() started being
conditionally elided and stems from the routine always failing
after seeing the flag, regardless if there were any links.

In order to address both the bug and the weird semantics make it illegal
to call legitimize_links() with LOOKUP_CACHED and handle the problem at
the two callsites.

Fixes: 7c179096e77eca21 ("fs: add predicts based on nd->depth")
Reported-by: Chris Mason <clm@meta.com>
Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
Link: https://patch.msgid.link/20251220054023.142134-1-mjguzik@gmail.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-12-24 13:31:56 +01:00
David Howells
570ad253a3
netfs: Fix early read unlock of page with EOF in middle
The read result collection for buffered reads seems to run ahead of the
completion of subrequests under some circumstances, as can be seen in the
following log snippet:

    9p_client_res: client 18446612686390831168 response P9_TREAD tag  0 err 0
    ...
    netfs_sreq: R=00001b55[1] DOWN TERM  f=192 s=0 5fb2/5fb2 s=5 e=0
    ...
    netfs_collect_folio: R=00001b55 ix=00004 r=4000-5000 t=4000/5fb2
    netfs_folio: i=157f3 ix=00004-00004 read-done
    netfs_folio: i=157f3 ix=00004-00004 read-unlock
    netfs_collect_folio: R=00001b55 ix=00005 r=5000-5fb2 t=5000/5fb2
    netfs_folio: i=157f3 ix=00005-00005 read-done
    netfs_folio: i=157f3 ix=00005-00005 read-unlock
    ...
    netfs_collect_stream: R=00001b55[0:] cto=5fb2 frn=ffffffff
    netfs_collect_state: R=00001b55 col=5fb2 cln=6000 n=c
    netfs_collect_stream: R=00001b55[0:] cto=5fb2 frn=ffffffff
    netfs_collect_state: R=00001b55 col=5fb2 cln=6000 n=8
    ...
    netfs_sreq: R=00001b55[2] ZERO SUBMT f=000 s=5fb2 0/4e s=0 e=0
    netfs_sreq: R=00001b55[2] ZERO TERM  f=102 s=5fb2 4e/4e s=5 e=0

The 'cto=5fb2' indicates the collected file pos we've collected results to
so far - but we still have 0x4e more bytes to go - so we shouldn't have
collected folio ix=00005 yet.  The 'ZERO' subreq that clears the tail
happens after we unlock the folio, allowing the application to see the
uncleared tail through mmap.

The problem is that netfs_read_unlock_folios() will unlock a folio in which
the amount of read results collected hits EOF position - but the ZERO
subreq lies beyond that and so happens after.

Fix this by changing the end check to always be the end of the folio and
never the end of the file.

In the future, I should look at clearing to the end of the folio here rather
than adding a ZERO subreq to do this.  On the other hand, the ZERO subreq can
run in parallel with an async READ subreq.  Further, the ZERO subreq may still
be necessary to, say, handle extents in a ceph file that don't have any
backing store and are thus implicitly all zeros.

This can be reproduced by creating a file, the size of which doesn't align
to a page boundary, e.g. 24998 (0x5fb2) bytes and then doing something
like:

    xfs_io -c "mmap -r 0 0x6000" -c "madvise -d 0 0x6000" \
           -c "mread -v 0 0x6000" /xfstest.test/x

The last 0x4e bytes should all be 00, but if the tail hasn't been cleared
yet, you may see rubbish there.  This can be reproduced with kafs by
modifying the kernel to disable the call to netfs_read_subreq_progress()
and to stop afs_issue_read() from doing the async call for NETFS_READAHEAD.
Reproduction can be made easier by inserting an mdelay(100) in
netfs_issue_read() for the ZERO-subreq case.

AFS and CIFS are normally unlikely to show this as they dispatch READ ops
asynchronously, which allows the ZERO-subreq to finish first.  9P's READ op is
completely synchronous, so the ZERO-subreq will always happen after.  It isn't
seen all the time, though, because the collection may be done in a worker
thread.

Reported-by: Christian Schoenebeck <linux_oss@crudebyte.com>
Link: https://lore.kernel.org/r/8622834.T7Z3S40VBb@weasel/
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://patch.msgid.link/938162.1766233900@warthog.procyon.org.uk
Fixes: e2d46f2ec332 ("netfs: Change the read result collector to only use one work item")
Tested-by: Christian Schoenebeck <linux_oss@crudebyte.com>
Acked-by: Dominique Martinet <asmadeus@codewreck.org>
Suggested-by: Dominique Martinet <asmadeus@codewreck.org>
cc: Dominique Martinet <asmadeus@codewreck.org>
cc: Christian Schoenebeck <linux_oss@crudebyte.com>
cc: v9fs@lists.linux.dev
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-12-24 13:30:24 +01:00
Bartosz Golaszewski
a05543d6b0 gpio: it87: balance superio enter/exit calls in error path
We always call superio_enter() in it87_gpio_direction_out() but only
call superio_exit() if the call to it87_gpio_set() succeeds. Move the
label to balance the calls in error path as well.

Fixes: ef877a159072 ("gpio: it87: use new line value setter callbacks")
Reported-by: Daniel Gibson <daniel@gibson.sh>
Closes: https://lore.kernel.org/all/bd0a00e3-9b8c-43e8-8772-e67b91f4c71e@gibson.sh/
Link: https://lore.kernel.org/r/20251210055026.23146-1-bartosz.golaszewski@oss.qualcomm.com
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
2025-12-18 11:29:42 +01:00
Alexandre Courbot
b58c87b0fd gpu: nova-core: gsp: replace firmware version with "bindings" alias
We have an "bindings" alias to avoid having to mention the firmware
version again and again, and limit the diff when upgrading the firmware.
Use it where we neglected to.

Fixes: eaf0989c77e4 ("gpu: nova-core: Add bindings required by GSP sequencer")
Reviewed-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Joel Fernandes <joelagnelf@nvidia.com>
Acked-by: Danilo Krummrich <dakr@kernel.org>
Link: https://patch.msgid.link/20251216-nova-fixes-v3-4-c7469a71f7c4@nvidia.com
Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
2025-12-16 22:04:25 +09:00
Alexandre Courbot
523317152c gpu: nova-core: bindings: derive MaybeZeroable
Commit 4846300ba8f9 ("rust: derive `Zeroable` for all structs & unions
generated by bindgen where possible") automatically derives
`MaybeZeroable` for all bindings. This is better than selectively
deriving `Zeroable` as it ensures all types that can implement
`Zeroable` do.

Regenerate the nova-core bindings so they benefit from this, and remove
a now unneeded implementation of `Zeroable`.

Fixes: 75f6b1de8133 ("gpu: nova-core: gsp: Add GSP command queue bindings and handling")
Reviewed-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Joel Fernandes <joelagnelf@nvidia.com>
Acked-by: Danilo Krummrich <dakr@kernel.org>
Link: https://patch.msgid.link/20251216-nova-fixes-v3-3-c7469a71f7c4@nvidia.com
Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
2025-12-16 22:04:25 +09:00
Alexandre Courbot
9d250ab0cf gpu: nova-core: gsp: fix length of received messages
The size of messages' payload is miscalculated, leading to extra data
passed to the message handler. While this is not a problem with our
current set of commands, others with a variable-length payload may
misbehave. Fix this by introducing a method returning the payload size
and using it.

Fixes: 75f6b1de8133 ("gpu: nova-core: gsp: Add GSP command queue bindings and handling")
Reviewed-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Joel Fernandes <joelagnelf@nvidia.com>
Reviewed-by: Alistair Popple <apopple@nvidia.com>
Acked-by: Danilo Krummrich <dakr@kernel.org>
Link: https://patch.msgid.link/20251216-nova-fixes-v3-2-c7469a71f7c4@nvidia.com
[acourbot@nvidia.com: update `PANIC:` comments as pointed out by Joel.]
Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
2025-12-16 22:03:48 +09:00
Alexandre Courbot
b6c7651823 gpu: nova-core: bindings: add missing explicit padding
Explicit padding is needed in order to avoid uninitialized bytes and
safely implement `AsBytes`. The `--explicit-padding` of bindgen was
omitted by mistake when these bindings were generated.

Fixes: 13f85988d4fa ("gpu: nova-core: gsp: Retrieve GSP static info to gather GPU information")
Reviewed-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Joel Fernandes <joelagnelf@nvidia.com>
Acked-by: Danilo Krummrich <dakr@kernel.org>
Link: https://patch.msgid.link/20251216-nova-fixes-v3-1-c7469a71f7c4@nvidia.com
Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
2025-12-16 21:59:40 +09:00
Alexandre Courbot
3d3352e73a gpu: nova-core: select RUST_FW_LOADER_ABSTRACTIONS
RUST_FW_LOADER_ABSTRACTIONS was depended on by NOVA_CORE, but NOVA_CORE
is selected by DRM_NOVA. This creates a situation where, if DRM_NOVA is
selected, NOVA_CORE gets enabled but not RUST_FW_LOADER_ABSTRACTIONS,
which results in a build error.

Since the firmware loader is an implementation detail of the driver, it
should be enabled along with it, so change the "depends on" to a
"select".

Fixes: 54e6baf123fd ("gpu: nova-core: add initial driver stub")
Closes: https://lore.kernel.org/oe-kbuild-all/202512061721.rxKGnt5q-lkp@intel.com/
Tested-by: Alyssa Ross <hi@alyssa.is>
Acked-by: Danilo Krummrich <dakr@kernel.org>
Link: https://patch.msgid.link/20251106-b4-select-rust-fw-v3-2-771172257755@nvidia.com
Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
2025-12-16 21:56:44 +09:00
Philipp Stanner
f371d2afd5 MAINTAINERS: Update Nova GPU driver git link
Nova driver development has been moved to a different git repository.
Update the MAINTAINERS entry to reflect that.

Reported-by: Gary Guo <gary@garyguo.net>
Signed-off-by: Philipp Stanner <phasta@kernel.org>
Link: https://patch.msgid.link/20251208140713.41330-3-phasta@kernel.org
Signed-off-by: Danilo Krummrich <dakr@kernel.org>
2025-12-15 22:51:26 +01:00
Christian Brauner
24835a96f2
Merge patch series "filelock: fix conflict detection with userland file delegations"
Jeff Layton <jlayton@kernel.org> says:

This patchset fixes the way that conflicts are detected when userland
requests file delegations. The problem is due to a hack that was added
long ago which worked up until userland could request a file delegation.

This fixes the bug and makes things a bit less hacky. Please consider
for v6.19.

* patches from https://patch.msgid.link/20251204-dir-deleg-ro-v2-0-22d37f92ce2c@kernel.org:
  filelock: allow lease_managers to dictate what qualifies as a conflict
  filelock: add lease_dispose_list() helper

Link: https://patch.msgid.link/20251204-dir-deleg-ro-v2-0-22d37f92ce2c@kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-12-15 15:20:38 +01:00
Jeff Layton
12965a190e
filelock: allow lease_managers to dictate what qualifies as a conflict
Requesting a delegation on a file from the userland fcntl() interface
currently succeeds when there are conflicting opens present.

This is because the lease handling code ignores conflicting opens for
FL_LAYOUT and FL_DELEG leases. This was a hack put in place long ago,
because nfsd already checks for conflicts in its own way. The kernel
needs to perform this check for userland delegations the same way it is
done for leases, however.

Make this dependent on the lease_manager by adding a new
->lm_open_conflict() lease_manager operation and have
generic_add_lease() call that instead of check_conflicting_open().
Morph check_conflicting_open() into a ->lm_open_conflict() op that is
only called for userland leases/delegations. Set the
->lm_open_conflict() operations for nfsd to trivial functions that
always return 0.

Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Jeff Layton <jlayton@kernel.org>
Link: https://patch.msgid.link/20251204-dir-deleg-ro-v2-2-22d37f92ce2c@kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-12-15 15:20:33 +01:00
Jeff Layton
392e317a20
filelock: add lease_dispose_list() helper
The lease-handling code paths always know they're disposing of leases,
yet locks_dispose_list() checks flags at runtime to determine whether
to call locks_free_lease() or locks_free_lock().

Split out a dedicated lease_dispose_list() helper for lease code paths.
This makes the type handling explicit and prepares for the upcoming
lease_manager enhancements where lease-specific operations are being
consolidated.

Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Jeff Layton <jlayton@kernel.org>
Link: https://patch.msgid.link/20251204-dir-deleg-ro-v2-1-22d37f92ce2c@kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-12-15 15:20:33 +01:00
Brian Foster
ed61378b4d
iomap: replace folio_batch allocation with stack allocation
Zhang Yi points out that the dynamic folio_batch allocation in
iomap_fill_dirty_folios() is problematic for the ext4 on iomap work
that is under development because it doesn't sufficiently handle the
allocation failure case (by allowing a retry, for example). We've
also seen lockdep (via syzbot) complain recently about the scope of
the allocation.

The dynamic allocation was initially added for simplicity and to
help indicate whether the batch was used or not by the calling fs.
To address these issues, put the batch on the stack of
iomap_zero_range() and use a flag to control whether the batch
should be used in the iomap folio lookup path. This keeps things
simple and eliminates allocation issues with lockdep and for ext4 on
iomap.

While here, also clean up the fill helper signature to be more
consistent with the underlying filemap helper. Pass through the
return value of the filemap helper (folio count) and update the
lookup offset via an out param.

Fixes: 395ed1ef0012 ("iomap: optional zero range dirty folio processing")
Signed-off-by: Brian Foster <bfoster@redhat.com>
Link: https://patch.msgid.link/20251208140548.373411-1-bfoster@redhat.com
Acked-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-12-15 15:17:44 +01:00
Mathias Krause
a260bd22a3
media: mc: fix potential use-after-free in media_request_alloc()
Commit 6f504cbf108a ("media: convert media_request_alloc() to
FD_PREPARE()") moved the call to fd_install() (now hidden in
fd_publish()) before the snprintf(), making the later write to
potentially already freed memory, as userland is free to call
close() concurrently right after the call to fd_install() which
may end up in the request_fops.release() handler freeing 'req'.

Fixes: 6f504cbf108a ("media: convert media_request_alloc() to FD_PREPARE()")
Signed-off-by: Mathias Krause <minipli@grsecurity.net>
Link: https://patch.msgid.link/20251209210903.603958-1-minipli@grsecurity.net
Signed-off-by: Christian Brauner <brauner@kernel.org>
2025-12-15 15:12:28 +01:00
77 changed files with 1343 additions and 1136 deletions

View File

@ -416,6 +416,7 @@ lm_change yes no no
lm_breaker_owns_lease: yes no no
lm_lock_expirable yes no no
lm_expire_lock no no yes
lm_open_conflict yes no no
====================== ============= ================= =========
buffer_head

View File

@ -2159,7 +2159,7 @@ M: Alice Ryhl <aliceryhl@google.com>
L: dri-devel@lists.freedesktop.org
S: Supported
W: https://rust-for-linux.com/tyr-gpu-driver
W https://drm.pages.freedesktop.org/maintainer-tools/drm-rust.html
W: https://drm.pages.freedesktop.org/maintainer-tools/drm-rust.html
B: https://gitlab.freedesktop.org/panfrost/linux/-/issues
T: git https://gitlab.freedesktop.org/drm/rust/kernel.git
F: Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml
@ -8068,7 +8068,7 @@ W: https://rust-for-linux.com/nova-gpu-driver
Q: https://patchwork.freedesktop.org/project/nouveau/
B: https://gitlab.freedesktop.org/drm/nova/-/issues
C: irc://irc.oftc.net/nouveau
T: git https://gitlab.freedesktop.org/drm/nova.git nova-next
T: git https://gitlab.freedesktop.org/drm/rust/kernel.git drm-rust-next
F: Documentation/gpu/nova/
F: drivers/gpu/nova-core/
@ -8080,7 +8080,7 @@ W: https://rust-for-linux.com/nova-gpu-driver
Q: https://patchwork.freedesktop.org/project/nouveau/
B: https://gitlab.freedesktop.org/drm/nova/-/issues
C: irc://irc.oftc.net/nouveau
T: git https://gitlab.freedesktop.org/drm/nova.git nova-next
T: git https://gitlab.freedesktop.org/drm/rust/kernel.git drm-rust-next
F: Documentation/gpu/nova/
F: drivers/gpu/drm/nova/
F: include/uapi/drm/nova_drm.h
@ -8358,6 +8358,7 @@ X: drivers/gpu/drm/msm/
X: drivers/gpu/drm/nova/
X: drivers/gpu/drm/radeon/
X: drivers/gpu/drm/tegra/
X: drivers/gpu/drm/tyr/
X: drivers/gpu/drm/xe/
DRM DRIVERS AND COMMON INFRASTRUCTURE [RUST]

View File

@ -181,6 +181,28 @@ static int __init ofpci_debug(char *str)
__setup("ofpci_debug=", ofpci_debug);
static void of_fixup_pci_pref(struct pci_dev *dev, int index,
struct resource *res)
{
struct pci_bus_region region;
if (!(res->flags & IORESOURCE_MEM_64))
return;
if (!resource_size(res))
return;
pcibios_resource_to_bus(dev->bus, &region, res);
if (region.end <= ~((u32)0))
return;
if (!(res->flags & IORESOURCE_PREFETCH)) {
res->flags |= IORESOURCE_PREFETCH;
pci_info(dev, "reg 0x%x: fixup: pref added to 64-bit resource\n",
index);
}
}
static unsigned long pci_parse_of_flags(u32 addr0)
{
unsigned long flags = 0;
@ -244,6 +266,7 @@ static void pci_parse_of_addrs(struct platform_device *op,
res->end = op_res->end;
res->flags = flags;
res->name = pci_name(dev);
of_fixup_pci_pref(dev, i, res);
pci_info(dev, "reg 0x%x: %pR\n", i, res);
}

View File

@ -188,7 +188,7 @@ static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev,
* the IRQ value, which is hardwired to specific interrupt inputs on
* the interrupt controller.
*/
pr_debug("%04x:%02x:%02x[%c] -> %s[%d]\n",
pr_debug("%04x:%02x:%02x[%c] -> %s[%u]\n",
entry->id.segment, entry->id.bus, entry->id.device,
pin_name(entry->pin), prt->source, entry->index);
@ -384,7 +384,7 @@ static inline bool acpi_pci_irq_valid(struct pci_dev *dev, u8 pin)
int acpi_pci_irq_enable(struct pci_dev *dev)
{
struct acpi_prt_entry *entry;
int gsi;
u32 gsi;
u8 pin;
int triggering = ACPI_LEVEL_SENSITIVE;
/*
@ -422,18 +422,21 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return 0;
}
rc = -ENODEV;
if (entry) {
if (entry->link)
gsi = acpi_pci_link_allocate_irq(entry->link,
rc = acpi_pci_link_allocate_irq(entry->link,
entry->index,
&triggering, &polarity,
&link);
else
&link, &gsi);
else {
gsi = entry->index;
} else
gsi = -1;
rc = 0;
}
}
if (gsi < 0) {
if (rc < 0) {
/*
* No IRQ known to the ACPI subsystem - maybe the BIOS /
* driver reported one, then use it. Exit in any case.

View File

@ -448,7 +448,7 @@ static int acpi_isa_irq_penalty[ACPI_MAX_ISA_IRQS] = {
/* >IRQ15 */
};
static int acpi_irq_pci_sharing_penalty(int irq)
static int acpi_irq_pci_sharing_penalty(u32 irq)
{
struct acpi_pci_link *link;
int penalty = 0;
@ -474,7 +474,7 @@ static int acpi_irq_pci_sharing_penalty(int irq)
return penalty;
}
static int acpi_irq_get_penalty(int irq)
static int acpi_irq_get_penalty(u32 irq)
{
int penalty = 0;
@ -528,7 +528,7 @@ static int acpi_irq_balance = -1; /* 0: static, 1: balance */
static int acpi_pci_link_allocate(struct acpi_pci_link *link)
{
acpi_handle handle = link->device->handle;
int irq;
u32 irq;
int i;
if (link->irq.initialized) {
@ -598,44 +598,53 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
return 0;
}
/*
* acpi_pci_link_allocate_irq
* success: return IRQ >= 0
* failure: return -1
/**
* acpi_pci_link_allocate_irq(): Retrieve a link device GSI
*
* @handle: Handle for the link device
* @index: GSI index
* @triggering: pointer to store the GSI trigger
* @polarity: pointer to store GSI polarity
* @name: pointer to store link device name
* @gsi: pointer to store GSI number
*
* Returns:
* 0 on success with @triggering, @polarity, @name, @gsi initialized.
* -ENODEV on failure
*/
int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
int *polarity, char **name)
int *polarity, char **name, u32 *gsi)
{
struct acpi_device *device = acpi_fetch_acpi_dev(handle);
struct acpi_pci_link *link;
if (!device) {
acpi_handle_err(handle, "Invalid link device\n");
return -1;
return -ENODEV;
}
link = acpi_driver_data(device);
if (!link) {
acpi_handle_err(handle, "Invalid link context\n");
return -1;
return -ENODEV;
}
/* TBD: Support multiple index (IRQ) entries per Link Device */
if (index) {
acpi_handle_err(handle, "Invalid index %d\n", index);
return -1;
return -ENODEV;
}
mutex_lock(&acpi_link_lock);
if (acpi_pci_link_allocate(link)) {
mutex_unlock(&acpi_link_lock);
return -1;
return -ENODEV;
}
if (!link->irq.active) {
mutex_unlock(&acpi_link_lock);
acpi_handle_err(handle, "Link active IRQ is 0!\n");
return -1;
return -ENODEV;
}
link->refcnt++;
mutex_unlock(&acpi_link_lock);
@ -647,7 +656,9 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
if (name)
*name = acpi_device_bid(link->device);
acpi_handle_debug(handle, "Link is referenced\n");
return link->irq.active;
*gsi = link->irq.active;
return 0;
}
/*

View File

@ -41,8 +41,6 @@ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
adf_error_notifier(accel_dev);
adf_pf2vf_notify_fatal_error(accel_dev);
adf_dev_restarting_notify(accel_dev);
adf_pf2vf_notify_restarting(accel_dev);
adf_pf2vf_wait_for_restarting_complete(accel_dev);
pci_clear_master(pdev);
adf_dev_down(accel_dev);

View File

@ -12,6 +12,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cleanup.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@ -241,23 +242,17 @@ static int it87_gpio_direction_out(struct gpio_chip *chip,
mask = 1 << (gpio_num % 8);
group = (gpio_num / 8);
spin_lock(&it87_gpio->lock);
guard(spinlock)(&it87_gpio->lock);
rc = superio_enter();
if (rc)
goto exit;
return rc;
/* set the output enable bit */
superio_set_mask(mask, group + it87_gpio->output_base);
rc = it87_gpio_set(chip, gpio_num, val);
if (rc)
goto exit;
superio_exit();
exit:
spin_unlock(&it87_gpio->lock);
return rc;
}

View File

@ -548,6 +548,13 @@ static void gpio_mpsse_ida_remove(void *data)
ida_free(&gpio_mpsse_ida, priv->id);
}
static void gpio_mpsse_usb_put_dev(void *data)
{
struct mpsse_priv *priv = data;
usb_put_dev(priv->udev);
}
static int mpsse_init_valid_mask(struct gpio_chip *chip,
unsigned long *valid_mask,
unsigned int ngpios)
@ -592,6 +599,10 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
INIT_LIST_HEAD(&priv->workers);
priv->udev = usb_get_dev(interface_to_usbdev(interface));
err = devm_add_action_or_reset(dev, gpio_mpsse_usb_put_dev, priv);
if (err)
return err;
priv->intf = interface;
priv->intf_id = interface->cur_altsetting->desc.bInterfaceNumber;
@ -713,7 +724,6 @@ static void gpio_mpsse_disconnect(struct usb_interface *intf)
priv->intf = NULL;
usb_set_intfdata(intf, NULL);
usb_put_dev(priv->udev);
}
static struct usb_driver gpio_mpsse_driver = {

View File

@ -943,14 +943,35 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
DECLARE_BITMAP(old_stat, MAX_LINE);
DECLARE_BITMAP(cur_stat, MAX_LINE);
DECLARE_BITMAP(new_stat, MAX_LINE);
DECLARE_BITMAP(int_stat, MAX_LINE);
DECLARE_BITMAP(trigger, MAX_LINE);
DECLARE_BITMAP(edges, MAX_LINE);
int ret;
if (chip->driver_data & PCA_PCAL) {
/* Read INT_STAT before it is cleared by the input-port read. */
ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, int_stat);
if (ret)
return false;
}
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
if (ret)
return false;
if (chip->driver_data & PCA_PCAL) {
/* Detect short pulses via INT_STAT. */
bitmap_and(trigger, int_stat, chip->irq_mask, gc->ngpio);
/* Apply filter for rising/falling edge selection. */
bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise,
cur_stat, gc->ngpio);
bitmap_and(int_stat, new_stat, trigger, gc->ngpio);
} else {
bitmap_zero(int_stat, gc->ngpio);
}
/* Remove output pins from the equation */
pca953x_read_regs(chip, chip->regs->direction, reg_direction);
@ -964,7 +985,8 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
if (bitmap_empty(chip->irq_trig_level_high, gc->ngpio) &&
bitmap_empty(chip->irq_trig_level_low, gc->ngpio)) {
if (bitmap_empty(trigger, gc->ngpio))
if (bitmap_empty(trigger, gc->ngpio) &&
bitmap_empty(int_stat, gc->ngpio))
return false;
}
@ -972,6 +994,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio);
bitmap_or(edges, old_stat, cur_stat, gc->ngpio);
bitmap_and(pending, edges, trigger, gc->ngpio);
bitmap_or(pending, pending, int_stat, gc->ngpio);
bitmap_and(cur_stat, new_stat, chip->irq_trig_level_high, gc->ngpio);
bitmap_and(cur_stat, cur_stat, chip->irq_mask, gc->ngpio);

View File

@ -593,6 +593,7 @@ static int rockchip_gpiolib_register(struct rockchip_pin_bank *bank)
gc->ngpio = bank->nr_pins;
gc->label = bank->name;
gc->parent = bank->dev;
gc->can_sleep = true;
ret = gpiochip_add_data(gc, bank);
if (ret) {

View File

@ -38,8 +38,10 @@ struct gpio_shared_ref {
int dev_id;
/* Protects the auxiliary device struct and the lookup table. */
struct mutex lock;
struct lock_class_key lock_key;
struct auxiliary_device adev;
struct gpiod_lookup_table *lookup;
bool is_reset_gpio;
};
/* Represents a single GPIO pin. */
@ -76,6 +78,60 @@ gpio_shared_find_entry(struct fwnode_handle *controller_node,
return NULL;
}
static struct gpio_shared_ref *gpio_shared_make_ref(struct fwnode_handle *fwnode,
const char *con_id,
enum gpiod_flags flags)
{
char *con_id_cpy __free(kfree) = NULL;
struct gpio_shared_ref *ref __free(kfree) = kzalloc(sizeof(*ref), GFP_KERNEL);
if (!ref)
return NULL;
if (con_id) {
con_id_cpy = kstrdup(con_id, GFP_KERNEL);
if (!con_id_cpy)
return NULL;
}
ref->dev_id = ida_alloc(&gpio_shared_ida, GFP_KERNEL);
if (ref->dev_id < 0)
return NULL;
ref->flags = flags;
ref->con_id = no_free_ptr(con_id_cpy);
ref->fwnode = fwnode;
lockdep_register_key(&ref->lock_key);
mutex_init_with_key(&ref->lock, &ref->lock_key);
return no_free_ptr(ref);
}
static int gpio_shared_setup_reset_proxy(struct gpio_shared_entry *entry,
enum gpiod_flags flags)
{
struct gpio_shared_ref *ref;
list_for_each_entry(ref, &entry->refs, list) {
if (ref->is_reset_gpio)
/* Already set-up. */
return 0;
}
ref = gpio_shared_make_ref(NULL, "reset", flags);
if (!ref)
return -ENOMEM;
ref->is_reset_gpio = true;
list_add_tail(&ref->list, &entry->refs);
pr_debug("Created a secondary shared GPIO reference for potential reset-gpio device for GPIO %u at %s\n",
entry->offset, fwnode_get_name(entry->fwnode));
return 0;
}
/* Handle all special nodes that we should ignore. */
static bool gpio_shared_of_node_ignore(struct device_node *node)
{
@ -106,6 +162,7 @@ static int gpio_shared_of_traverse(struct device_node *curr)
size_t con_id_len, suffix_len;
struct fwnode_handle *fwnode;
struct of_phandle_args args;
struct gpio_shared_ref *ref;
struct property *prop;
unsigned int offset;
const char *suffix;
@ -138,6 +195,7 @@ static int gpio_shared_of_traverse(struct device_node *curr)
for (i = 0; i < count; i++) {
struct device_node *np __free(device_node) = NULL;
char *con_id __free(kfree) = NULL;
ret = of_parse_phandle_with_args(curr, prop->name,
"#gpio-cells", i,
@ -182,15 +240,6 @@ static int gpio_shared_of_traverse(struct device_node *curr)
list_add_tail(&entry->list, &gpio_shared_list);
}
struct gpio_shared_ref *ref __free(kfree) =
kzalloc(sizeof(*ref), GFP_KERNEL);
if (!ref)
return -ENOMEM;
ref->fwnode = fwnode_handle_get(of_fwnode_handle(curr));
ref->flags = args.args[1];
mutex_init(&ref->lock);
if (strends(prop->name, "gpios"))
suffix = "-gpios";
else if (strends(prop->name, "gpio"))
@ -202,27 +251,32 @@ static int gpio_shared_of_traverse(struct device_node *curr)
/* We only set con_id if there's actually one. */
if (strcmp(prop->name, "gpios") && strcmp(prop->name, "gpio")) {
ref->con_id = kstrdup(prop->name, GFP_KERNEL);
if (!ref->con_id)
con_id = kstrdup(prop->name, GFP_KERNEL);
if (!con_id)
return -ENOMEM;
con_id_len = strlen(ref->con_id);
con_id_len = strlen(con_id);
suffix_len = strlen(suffix);
ref->con_id[con_id_len - suffix_len] = '\0';
con_id[con_id_len - suffix_len] = '\0';
}
ref->dev_id = ida_alloc(&gpio_shared_ida, GFP_KERNEL);
if (ref->dev_id < 0) {
kfree(ref->con_id);
ref = gpio_shared_make_ref(fwnode_handle_get(of_fwnode_handle(curr)),
con_id, args.args[1]);
if (!ref)
return -ENOMEM;
}
if (!list_empty(&entry->refs))
pr_debug("GPIO %u at %s is shared by multiple firmware nodes\n",
entry->offset, fwnode_get_name(entry->fwnode));
list_add_tail(&no_free_ptr(ref)->list, &entry->refs);
list_add_tail(&ref->list, &entry->refs);
if (strcmp(prop->name, "reset-gpios") == 0) {
ret = gpio_shared_setup_reset_proxy(entry, args.args[1]);
if (ret)
return ret;
}
}
}
@ -306,20 +360,16 @@ static bool gpio_shared_dev_is_reset_gpio(struct device *consumer,
struct fwnode_handle *reset_fwnode = dev_fwnode(consumer);
struct fwnode_reference_args ref_args, aux_args;
struct device *parent = consumer->parent;
struct gpio_shared_ref *real_ref;
bool match;
int ret;
lockdep_assert_held(&ref->lock);
/* The reset-gpio device must have a parent AND a firmware node. */
if (!parent || !reset_fwnode)
return false;
/*
* FIXME: use device_is_compatible() once the reset-gpio drivers gains
* a compatible string which it currently does not have.
*/
if (!strstarts(dev_name(consumer), "reset.gpio."))
return false;
/*
* Parent of the reset-gpio auxiliary device is the GPIO chip whose
* fwnode we stored in the entry structure.
@ -328,33 +378,61 @@ static bool gpio_shared_dev_is_reset_gpio(struct device *consumer,
return false;
/*
* The device associated with the shared reference's firmware node is
* the consumer of the reset control exposed by the reset-gpio device.
* It must have a "reset-gpios" property that's referencing the entry's
* firmware node.
*
* The reference args must agree between the real consumer and the
* auxiliary reset-gpio device.
* Now we need to find the actual pin we want to assign to this
* reset-gpio device. To that end: iterate over the list of references
* of this entry and see if there's one, whose reset-gpios property's
* arguments match the ones from this consumer's node.
*/
ret = fwnode_property_get_reference_args(ref->fwnode, "reset-gpios",
NULL, 2, 0, &ref_args);
if (ret)
return false;
list_for_each_entry(real_ref, &entry->refs, list) {
if (real_ref == ref)
continue;
guard(mutex)(&real_ref->lock);
if (!real_ref->fwnode)
continue;
/*
* The device associated with the shared reference's firmware
* node is the consumer of the reset control exposed by the
* reset-gpio device. It must have a "reset-gpios" property
* that's referencing the entry's firmware node.
*
* The reference args must agree between the real consumer and
* the auxiliary reset-gpio device.
*/
ret = fwnode_property_get_reference_args(real_ref->fwnode,
"reset-gpios",
NULL, 2, 0, &ref_args);
if (ret)
continue;
ret = fwnode_property_get_reference_args(reset_fwnode, "reset-gpios",
NULL, 2, 0, &aux_args);
if (ret) {
fwnode_handle_put(ref_args.fwnode);
continue;
}
match = ((ref_args.fwnode == entry->fwnode) &&
(aux_args.fwnode == entry->fwnode) &&
(ref_args.args[0] == aux_args.args[0]));
ret = fwnode_property_get_reference_args(reset_fwnode, "reset-gpios",
NULL, 2, 0, &aux_args);
if (ret) {
fwnode_handle_put(ref_args.fwnode);
return false;
fwnode_handle_put(aux_args.fwnode);
if (!match)
continue;
/*
* Reuse the fwnode of the real device, next time we'll use it
* in the normal path.
*/
ref->fwnode = fwnode_handle_get(reset_fwnode);
return true;
}
match = ((ref_args.fwnode == entry->fwnode) &&
(aux_args.fwnode == entry->fwnode) &&
(ref_args.args[0] == aux_args.args[0]));
fwnode_handle_put(ref_args.fwnode);
fwnode_handle_put(aux_args.fwnode);
return match;
return false;
}
#else
static bool gpio_shared_dev_is_reset_gpio(struct device *consumer,
@ -365,25 +443,34 @@ static bool gpio_shared_dev_is_reset_gpio(struct device *consumer,
}
#endif /* CONFIG_RESET_GPIO */
int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags)
int gpio_shared_add_proxy_lookup(struct device *consumer, const char *con_id,
unsigned long lflags)
{
const char *dev_id = dev_name(consumer);
struct gpiod_lookup_table *lookup;
struct gpio_shared_entry *entry;
struct gpio_shared_ref *ref;
struct gpiod_lookup_table *lookup __free(kfree) =
kzalloc(struct_size(lookup, table, 2), GFP_KERNEL);
if (!lookup)
return -ENOMEM;
list_for_each_entry(entry, &gpio_shared_list, list) {
list_for_each_entry(ref, &entry->refs, list) {
if (!device_match_fwnode(consumer, ref->fwnode) &&
!gpio_shared_dev_is_reset_gpio(consumer, entry, ref))
continue;
guard(mutex)(&ref->lock);
/*
* FIXME: use device_is_compatible() once the reset-gpio
* drivers gains a compatible string which it currently
* does not have.
*/
if (!ref->fwnode && strstarts(dev_name(consumer), "reset.gpio.")) {
if (!gpio_shared_dev_is_reset_gpio(consumer, entry, ref))
continue;
} else if (!device_match_fwnode(consumer, ref->fwnode)) {
continue;
}
if ((!con_id && ref->con_id) || (con_id && !ref->con_id) ||
(con_id && ref->con_id && strcmp(con_id, ref->con_id) != 0))
continue;
/* We've already done that on a previous request. */
if (ref->lookup)
return 0;
@ -395,6 +482,10 @@ int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags)
if (!key)
return -ENOMEM;
lookup = kzalloc(struct_size(lookup, table, 2), GFP_KERNEL);
if (!lookup)
return -ENOMEM;
pr_debug("Adding machine lookup entry for a shared GPIO for consumer %s, with key '%s' and con_id '%s'\n",
dev_id, key, ref->con_id ?: "none");
@ -402,7 +493,7 @@ int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags)
lookup->table[0] = GPIO_LOOKUP(no_free_ptr(key), 0,
ref->con_id, lflags);
ref->lookup = no_free_ptr(lookup);
ref->lookup = lookup;
gpiod_add_lookup_table(ref->lookup);
return 0;
@ -466,8 +557,9 @@ int gpio_device_setup_shared(struct gpio_device *gdev)
entry->offset, gpio_device_get_label(gdev));
list_for_each_entry(ref, &entry->refs, list) {
pr_debug("Setting up a shared GPIO entry for %s\n",
fwnode_get_name(ref->fwnode));
pr_debug("Setting up a shared GPIO entry for %s (con_id: '%s')\n",
fwnode_get_name(ref->fwnode) ?: "(no fwnode)",
ref->con_id ?: "(none)");
ret = gpio_shared_make_adev(gdev, entry, ref);
if (ret)
@ -487,15 +579,6 @@ void gpio_device_teardown_shared(struct gpio_device *gdev)
if (!device_match_fwnode(&gdev->dev, entry->fwnode))
continue;
/*
* For some reason if we call synchronize_srcu() in GPIO core,
* descent here and take this mutex and then recursively call
* synchronize_srcu() again from gpiochip_remove() (which is
* totally fine) called after gpio_shared_remove_adev(),
* lockdep prints a false positive deadlock splat. Disable
* lockdep here.
*/
lockdep_off();
list_for_each_entry(ref, &entry->refs, list) {
guard(mutex)(&ref->lock);
@ -508,7 +591,6 @@ void gpio_device_teardown_shared(struct gpio_device *gdev)
gpio_shared_remove_adev(&ref->adev);
}
lockdep_on();
}
}
@ -604,6 +686,7 @@ static void gpio_shared_drop_ref(struct gpio_shared_ref *ref)
{
list_del(&ref->list);
mutex_destroy(&ref->lock);
lockdep_unregister_key(&ref->lock_key);
kfree(ref->con_id);
ida_free(&gpio_shared_ida, ref->dev_id);
fwnode_handle_put(ref->fwnode);
@ -635,12 +718,38 @@ static void __init gpio_shared_teardown(void)
}
}
static bool gpio_shared_entry_is_really_shared(struct gpio_shared_entry *entry)
{
size_t num_nodes = list_count_nodes(&entry->refs);
struct gpio_shared_ref *ref;
if (num_nodes <= 1)
return false;
if (num_nodes > 2)
return true;
/* Exactly two references: */
list_for_each_entry(ref, &entry->refs, list) {
/*
* Corner-case: the second reference comes from the potential
* reset-gpio instance. However, this pin is not really shared
* as it would have three references in this case. Avoid
* creating unnecessary proxies.
*/
if (ref->is_reset_gpio)
return false;
}
return true;
}
static void gpio_shared_free_exclusive(void)
{
struct gpio_shared_entry *entry, *epos;
list_for_each_entry_safe(entry, epos, &gpio_shared_list, list) {
if (list_count_nodes(&entry->refs) > 1)
if (gpio_shared_entry_is_really_shared(entry))
continue;
gpio_shared_drop_ref(list_first_entry(&entry->refs,

View File

@ -16,7 +16,8 @@ struct device;
int gpio_device_setup_shared(struct gpio_device *gdev);
void gpio_device_teardown_shared(struct gpio_device *gdev);
int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags);
int gpio_shared_add_proxy_lookup(struct device *consumer, const char *con_id,
unsigned long lflags);
#else
@ -28,6 +29,7 @@ static inline int gpio_device_setup_shared(struct gpio_device *gdev)
static inline void gpio_device_teardown_shared(struct gpio_device *gdev) { }
static inline int gpio_shared_add_proxy_lookup(struct device *consumer,
const char *con_id,
unsigned long lflags)
{
return 0;

View File

@ -1105,6 +1105,18 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gdev->ngpio = gc->ngpio;
gdev->can_sleep = gc->can_sleep;
rwlock_init(&gdev->line_state_lock);
RAW_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier);
ret = init_srcu_struct(&gdev->srcu);
if (ret)
goto err_free_label;
ret = init_srcu_struct(&gdev->desc_srcu);
if (ret)
goto err_cleanup_gdev_srcu;
scoped_guard(mutex, &gpio_devices_lock) {
/*
* TODO: this allocates a Linux GPIO number base in the global
@ -1119,7 +1131,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
if (base < 0) {
ret = base;
base = 0;
goto err_free_label;
goto err_cleanup_desc_srcu;
}
/*
@ -1139,22 +1151,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gpiodev_add_to_list_unlocked(gdev);
if (ret) {
gpiochip_err(gc, "GPIO integer space overlap, cannot add chip\n");
goto err_free_label;
goto err_cleanup_desc_srcu;
}
}
rwlock_init(&gdev->line_state_lock);
RAW_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier);
ret = init_srcu_struct(&gdev->srcu);
if (ret)
goto err_remove_from_list;
ret = init_srcu_struct(&gdev->desc_srcu);
if (ret)
goto err_cleanup_gdev_srcu;
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
@ -1164,11 +1164,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gpiochip_set_names(gc);
if (ret)
goto err_cleanup_desc_srcu;
goto err_remove_from_list;
ret = gpiochip_init_valid_mask(gc);
if (ret)
goto err_cleanup_desc_srcu;
goto err_remove_from_list;
for (desc_index = 0; desc_index < gc->ngpio; desc_index++) {
struct gpio_desc *desc = &gdev->descs[desc_index];
@ -1248,10 +1248,6 @@ err_remove_of_chip:
of_gpiochip_remove(gc);
err_free_valid_mask:
gpiochip_free_valid_mask(gc);
err_cleanup_desc_srcu:
cleanup_srcu_struct(&gdev->desc_srcu);
err_cleanup_gdev_srcu:
cleanup_srcu_struct(&gdev->srcu);
err_remove_from_list:
scoped_guard(mutex, &gpio_devices_lock)
list_del_rcu(&gdev->list);
@ -1261,6 +1257,10 @@ err_remove_from_list:
gpio_device_put(gdev);
goto err_print_message;
}
err_cleanup_desc_srcu:
cleanup_srcu_struct(&gdev->desc_srcu);
err_cleanup_gdev_srcu:
cleanup_srcu_struct(&gdev->srcu);
err_free_label:
kfree_const(gdev->label);
err_free_descs:
@ -4508,45 +4508,41 @@ void gpiod_remove_hogs(struct gpiod_hog *hogs)
}
EXPORT_SYMBOL_GPL(gpiod_remove_hogs);
static struct gpiod_lookup_table *gpiod_find_lookup_table(struct device *dev)
static bool gpiod_match_lookup_table(struct device *dev,
const struct gpiod_lookup_table *table)
{
const char *dev_id = dev ? dev_name(dev) : NULL;
struct gpiod_lookup_table *table;
list_for_each_entry(table, &gpio_lookup_list, list) {
if (table->dev_id && dev_id) {
/*
* Valid strings on both ends, must be identical to have
* a match
*/
if (!strcmp(table->dev_id, dev_id))
return table;
} else {
/*
* One of the pointers is NULL, so both must be to have
* a match
*/
if (dev_id == table->dev_id)
return table;
}
lockdep_assert_held(&gpio_lookup_lock);
if (table->dev_id && dev_id) {
/*
* Valid strings on both ends, must be identical to have
* a match
*/
if (!strcmp(table->dev_id, dev_id))
return true;
} else {
/*
* One of the pointers is NULL, so both must be to have
* a match
*/
if (dev_id == table->dev_id)
return true;
}
return NULL;
return false;
}
static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
unsigned int idx, unsigned long *flags)
static struct gpio_desc *gpio_desc_table_match(struct device *dev, const char *con_id,
unsigned int idx, unsigned long *flags,
struct gpiod_lookup_table *table)
{
struct gpio_desc *desc = ERR_PTR(-ENOENT);
struct gpiod_lookup_table *table;
struct gpio_desc *desc;
struct gpiod_lookup *p;
struct gpio_chip *gc;
guard(mutex)(&gpio_lookup_lock);
table = gpiod_find_lookup_table(dev);
if (!table)
return desc;
lockdep_assert_held(&gpio_lookup_lock);
for (p = &table->table[0]; p->key; p++) {
/* idx must always match exactly */
@ -4600,7 +4596,30 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
return desc;
}
return desc;
return NULL;
}
static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
unsigned int idx, unsigned long *flags)
{
struct gpiod_lookup_table *table;
struct gpio_desc *desc;
guard(mutex)(&gpio_lookup_lock);
list_for_each_entry(table, &gpio_lookup_list, list) {
if (!gpiod_match_lookup_table(dev, table))
continue;
desc = gpio_desc_table_match(dev, con_id, idx, flags, table);
if (!desc)
continue;
/* On IS_ERR() or match. */
return desc;
}
return ERR_PTR(-ENOENT);
}
static int platform_gpio_count(struct device *dev, const char *con_id)
@ -4610,14 +4629,16 @@ static int platform_gpio_count(struct device *dev, const char *con_id)
unsigned int count = 0;
scoped_guard(mutex, &gpio_lookup_lock) {
table = gpiod_find_lookup_table(dev);
if (!table)
return -ENOENT;
list_for_each_entry(table, &gpio_lookup_list, list) {
if (!gpiod_match_lookup_table(dev, table))
continue;
for (p = &table->table[0]; p->key; p++) {
if ((con_id && p->con_id && !strcmp(con_id, p->con_id)) ||
(!con_id && !p->con_id))
count++;
for (p = &table->table[0]; p->key; p++) {
if ((con_id && p->con_id &&
!strcmp(con_id, p->con_id)) ||
(!con_id && !p->con_id))
count++;
}
}
}
@ -4696,7 +4717,8 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
* lookup table for the proxy device as previously
* we only knew the consumer's fwnode.
*/
ret = gpio_shared_add_proxy_lookup(consumer, lookupflags);
ret = gpio_shared_add_proxy_lookup(consumer, con_id,
lookupflags);
if (ret)
return ERR_PTR(ret);

View File

@ -3445,11 +3445,10 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
/* skip CG for VCE/UVD/VPE, it's handled specially */
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
@ -5867,6 +5866,9 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
if (ret)
goto mode1_reset_failed;
/* enable mmio access after mode 1 reset completed */
adev->no_hw_access = false;
amdgpu_device_load_pci_state(adev->pdev);
ret = amdgpu_psp_wait_for_bootloader(adev);
if (ret)

View File

@ -89,6 +89,16 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
return seq;
}
static void amdgpu_fence_save_fence_wptr_start(struct amdgpu_fence *af)
{
af->fence_wptr_start = af->ring->wptr;
}
static void amdgpu_fence_save_fence_wptr_end(struct amdgpu_fence *af)
{
af->fence_wptr_end = af->ring->wptr;
}
/**
* amdgpu_fence_emit - emit a fence on the requested ring
*
@ -116,8 +126,10 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
&ring->fence_drv.lock,
adev->fence_context + ring->idx, seq);
amdgpu_fence_save_fence_wptr_start(af);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
amdgpu_fence_save_fence_wptr_end(af);
amdgpu_fence_save_wptr(af);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
@ -709,6 +721,7 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
struct amdgpu_ring *ring = af->ring;
unsigned long flags;
u32 seq, last_seq;
bool reemitted = false;
last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
@ -726,7 +739,9 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) {
fence = container_of(unprocessed, struct amdgpu_fence, base);
if (fence == af)
if (fence->reemitted > 1)
reemitted = true;
else if (fence == af)
dma_fence_set_error(&fence->base, -ETIME);
else if (fence->context == af->context)
dma_fence_set_error(&fence->base, -ECANCELED);
@ -734,9 +749,12 @@ void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
rcu_read_unlock();
} while (last_seq != seq);
spin_unlock_irqrestore(&ring->fence_drv.lock, flags);
/* signal the guilty fence */
amdgpu_fence_write(ring, (u32)af->base.seqno);
amdgpu_fence_process(ring);
if (reemitted) {
/* if we've already reemitted once then just cancel everything */
amdgpu_fence_driver_force_completion(af->ring);
af->ring->ring_backup_entries_to_copy = 0;
}
}
void amdgpu_fence_save_wptr(struct amdgpu_fence *af)
@ -784,10 +802,18 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
/* save everything if the ring is not guilty, otherwise
* just save the content from other contexts.
*/
if (!guilty_fence || (fence->context != guilty_fence->context))
if (!fence->reemitted &&
(!guilty_fence || (fence->context != guilty_fence->context))) {
amdgpu_ring_backup_unprocessed_command(ring, wptr,
fence->wptr);
} else if (!fence->reemitted) {
/* always save the fence */
amdgpu_ring_backup_unprocessed_command(ring,
fence->fence_wptr_start,
fence->fence_wptr_end);
}
wptr = fence->wptr;
fence->reemitted++;
}
rcu_read_unlock();
} while (last_seq != seq);

View File

@ -318,12 +318,36 @@ void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr)
}
EXPORT_SYMBOL(isp_kernel_buffer_free);
static int isp_resume(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_isp *isp = &adev->isp;
if (isp->funcs->hw_resume)
return isp->funcs->hw_resume(isp);
return -ENODEV;
}
static int isp_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_isp *isp = &adev->isp;
if (isp->funcs->hw_suspend)
return isp->funcs->hw_suspend(isp);
return -ENODEV;
}
static const struct amd_ip_funcs isp_ip_funcs = {
.name = "isp_ip",
.early_init = isp_early_init,
.hw_init = isp_hw_init,
.hw_fini = isp_hw_fini,
.is_idle = isp_is_idle,
.suspend = isp_suspend,
.resume = isp_resume,
.set_clockgating_state = isp_set_clockgating_state,
.set_powergating_state = isp_set_powergating_state,
};

View File

@ -38,6 +38,8 @@ struct amdgpu_isp;
struct isp_funcs {
int (*hw_init)(struct amdgpu_isp *isp);
int (*hw_fini)(struct amdgpu_isp *isp);
int (*hw_suspend)(struct amdgpu_isp *isp);
int (*hw_resume)(struct amdgpu_isp *isp);
};
struct amdgpu_isp {

View File

@ -201,6 +201,9 @@ static enum amd_ip_block_type
type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
break;
case AMDGPU_HW_IP_VPE:
type = AMD_IP_BLOCK_TYPE_VPE;
break;
default:
type = AMD_IP_BLOCK_TYPE_NUM;
break;
@ -721,6 +724,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case AMD_IP_BLOCK_TYPE_UVD:
count = adev->uvd.num_uvd_inst;
break;
case AMD_IP_BLOCK_TYPE_VPE:
count = adev->vpe.num_instances;
break;
/* For all other IP block types not listed in the switch statement
* the ip status is valid here and the instance count is one.
*/

View File

@ -144,10 +144,15 @@ struct amdgpu_fence {
struct amdgpu_ring *ring;
ktime_t start_timestamp;
/* wptr for the fence for resets */
/* wptr for the total submission for resets */
u64 wptr;
/* fence context for resets */
u64 context;
/* has this fence been reemitted */
unsigned int reemitted;
/* wptr for the fence for the submission */
u64 fence_wptr_start;
u64 fence_wptr_end;
};
extern const struct drm_sched_backend_ops amdgpu_sched_ops;

View File

@ -26,6 +26,7 @@
*/
#include <linux/gpio/machine.h>
#include <linux/pm_runtime.h>
#include "amdgpu.h"
#include "isp_v4_1_1.h"
@ -145,6 +146,9 @@ static int isp_genpd_add_device(struct device *dev, void *data)
return -ENODEV;
}
/* The devices will be managed by the pm ops from the parent */
dev_pm_syscore_device(dev, true);
exit:
/* Continue to add */
return 0;
@ -177,12 +181,47 @@ static int isp_genpd_remove_device(struct device *dev, void *data)
drm_err(&adev->ddev, "Failed to remove dev from genpd %d\n", ret);
return -ENODEV;
}
dev_pm_syscore_device(dev, false);
exit:
/* Continue to remove */
return 0;
}
static int isp_suspend_device(struct device *dev, void *data)
{
return pm_runtime_force_suspend(dev);
}
static int isp_resume_device(struct device *dev, void *data)
{
return pm_runtime_force_resume(dev);
}
static int isp_v4_1_1_hw_suspend(struct amdgpu_isp *isp)
{
int r;
r = device_for_each_child(isp->parent, NULL,
isp_suspend_device);
if (r)
dev_err(isp->parent, "failed to suspend hw devices (%d)\n", r);
return r;
}
static int isp_v4_1_1_hw_resume(struct amdgpu_isp *isp)
{
int r;
r = device_for_each_child(isp->parent, NULL,
isp_resume_device);
if (r)
dev_err(isp->parent, "failed to resume hw device (%d)\n", r);
return r;
}
static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp)
{
const struct software_node *amd_camera_node, *isp4_node;
@ -369,6 +408,8 @@ static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp)
static const struct isp_funcs isp_v4_1_1_funcs = {
.hw_init = isp_v4_1_1_hw_init,
.hw_fini = isp_v4_1_1_hw_fini,
.hw_suspend = isp_v4_1_1_hw_suspend,
.hw_resume = isp_v4_1_1_hw_resume,
};
void isp_v4_1_1_set_isp_funcs(struct amdgpu_isp *isp)

View File

@ -763,14 +763,14 @@ static enum bp_result bios_parser_encoder_control(
return BP_RESULT_FAILURE;
return bp->cmd_tbl.dac1_encoder_control(
bp, cntl->action == ENCODER_CONTROL_ENABLE,
bp, cntl->action,
cntl->pixel_clock, ATOM_DAC1_PS2);
} else if (cntl->engine_id == ENGINE_ID_DACB) {
if (!bp->cmd_tbl.dac2_encoder_control)
return BP_RESULT_FAILURE;
return bp->cmd_tbl.dac2_encoder_control(
bp, cntl->action == ENCODER_CONTROL_ENABLE,
bp, cntl->action,
cntl->pixel_clock, ATOM_DAC1_PS2);
}

View File

@ -1797,7 +1797,30 @@ static enum bp_result select_crtc_source_v3(
&params.ucEncodeMode))
return BP_RESULT_BADINPUT;
params.ucDstBpc = bp_params->bit_depth;
switch (bp_params->color_depth) {
case COLOR_DEPTH_UNDEFINED:
params.ucDstBpc = PANEL_BPC_UNDEFINE;
break;
case COLOR_DEPTH_666:
params.ucDstBpc = PANEL_6BIT_PER_COLOR;
break;
default:
case COLOR_DEPTH_888:
params.ucDstBpc = PANEL_8BIT_PER_COLOR;
break;
case COLOR_DEPTH_101010:
params.ucDstBpc = PANEL_10BIT_PER_COLOR;
break;
case COLOR_DEPTH_121212:
params.ucDstBpc = PANEL_12BIT_PER_COLOR;
break;
case COLOR_DEPTH_141414:
dm_error("14-bit color not supported by SelectCRTC_Source v3\n");
break;
case COLOR_DEPTH_161616:
params.ucDstBpc = PANEL_16BIT_PER_COLOR;
break;
}
if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
result = BP_RESULT_OK;
@ -1815,12 +1838,12 @@ static enum bp_result select_crtc_source_v3(
static enum bp_result dac1_encoder_control_v1(
struct bios_parser *bp,
bool enable,
enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard);
static enum bp_result dac2_encoder_control_v1(
struct bios_parser *bp,
bool enable,
enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard);
@ -1846,12 +1869,15 @@ static void init_dac_encoder_control(struct bios_parser *bp)
static void dac_encoder_control_prepare_params(
DAC_ENCODER_CONTROL_PS_ALLOCATION *params,
bool enable,
enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard)
{
params->ucDacStandard = dac_standard;
if (enable)
if (action == ENCODER_CONTROL_SETUP ||
action == ENCODER_CONTROL_INIT)
params->ucAction = ATOM_ENCODER_INIT;
else if (action == ENCODER_CONTROL_ENABLE)
params->ucAction = ATOM_ENABLE;
else
params->ucAction = ATOM_DISABLE;
@ -1864,7 +1890,7 @@ static void dac_encoder_control_prepare_params(
static enum bp_result dac1_encoder_control_v1(
struct bios_parser *bp,
bool enable,
enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard)
{
@ -1873,7 +1899,7 @@ static enum bp_result dac1_encoder_control_v1(
dac_encoder_control_prepare_params(
&params,
enable,
action,
pixel_clock,
dac_standard);
@ -1885,7 +1911,7 @@ static enum bp_result dac1_encoder_control_v1(
static enum bp_result dac2_encoder_control_v1(
struct bios_parser *bp,
bool enable,
enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard)
{
@ -1894,7 +1920,7 @@ static enum bp_result dac2_encoder_control_v1(
dac_encoder_control_prepare_params(
&params,
enable,
action,
pixel_clock,
dac_standard);

View File

@ -57,12 +57,12 @@ struct cmd_tbl {
struct bp_crtc_source_select *bp_params);
enum bp_result (*dac1_encoder_control)(
struct bios_parser *bp,
bool enable,
enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard);
enum bp_result (*dac2_encoder_control)(
struct bios_parser *bp,
bool enable,
enum bp_encoder_control_action action,
uint32_t pixel_clock,
uint8_t dac_standard);
enum bp_result (*dac1_output_control)(

View File

@ -30,7 +30,11 @@ dml_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
frame_warn_limit := 3072
ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
frame_warn_limit := 4096
else
frame_warn_limit := 3072
endif
else
frame_warn_limit := 2048
endif

View File

@ -77,32 +77,14 @@ static unsigned int dscceComputeDelay(
static unsigned int dscComputeDelay(
enum output_format_class pixelFormat,
enum output_encoder_class Output);
// Super monster function with some 45 argument
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
unsigned int k,
Pipe *myPipe,
unsigned int DSCDelay,
double DPPCLKDelaySubtotalPlusCNVCFormater,
double DPPCLKDelaySCL,
double DPPCLKDelaySCLLBOnly,
double DPPCLKDelayCNVCCursor,
double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
enum output_format_class OutputFormat,
unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
unsigned int GPUVMPageTableLevels,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
double HostVMMinPageSize,
bool DynamicMetadataEnable,
bool DynamicMetadataVMEnabled,
int DynamicMetadataLinesBeforeActiveRequired,
unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@ -116,7 +98,6 @@ static bool CalculatePrefetchSchedule(
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
int BytePerPixelC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
long swath_width_luma_ub,
@ -124,9 +105,6 @@ static bool CalculatePrefetchSchedule(
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
bool ProgressiveToInterlaceUnitInOPP,
double *DSTXAfterScaler,
double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
@ -135,14 +113,7 @@ static bool CalculatePrefetchSchedule(
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
bool *NotEnoughTimeForDynamicMetadata,
double *Tno_bw,
double *prefetch_vmrow_bw,
double *Tdmdl_vm,
double *Tdmdl,
unsigned int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix);
bool *NotEnoughTimeForDynamicMetadata);
static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
static void CalculateDCCConfiguration(
@ -294,62 +265,23 @@ static void CalculateDynamicMetadataParameters(
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
unsigned int NumberOfActivePlanes,
unsigned int MaxLineBufferLines,
unsigned int LineBufferSize,
unsigned int DPPOutputBufferPixels,
unsigned int DETBufferSizeInKByte,
unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
bool GPUVMEnable,
unsigned int dpte_group_bytes[],
unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
double WritebackLatency,
double WritebackChunkSize,
double SOCCLK,
double DRAMClockChangeLatency,
double SRExitTime,
double SREnterPlusExitTime,
double DCFCLKDeepSleep,
unsigned int DPPPerPlane[],
bool DCCEnable[],
double DPPCLK[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
double HRatio[],
double HRatioChroma[],
unsigned int vtaps[],
unsigned int VTAPsChroma[],
double VRatio[],
double VRatioChroma[],
unsigned int HTotal[],
double PixelClock[],
unsigned int BlendingAndTiming[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
bool WritebackEnable[],
enum source_format_class WritebackPixelFormat[],
double WritebackDestinationWidth[],
double WritebackDestinationHeight[],
double WritebackSourceHeight[],
enum clock_change_support *DRAMClockChangeSupport,
double *UrgentWatermark,
double *WritebackUrgentWatermark,
double *DRAMClockChangeWatermark,
double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *MinActiveDRAMClockChangeLatencySupported);
enum clock_change_support *DRAMClockChangeSupport);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
unsigned int NumberOfActivePlanes,
@ -810,29 +742,12 @@ static unsigned int dscComputeDelay(enum output_format_class pixelFormat, enum o
static bool CalculatePrefetchSchedule(
struct display_mode_lib *mode_lib,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
double PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
unsigned int k,
Pipe *myPipe,
unsigned int DSCDelay,
double DPPCLKDelaySubtotalPlusCNVCFormater,
double DPPCLKDelaySCL,
double DPPCLKDelaySCLLBOnly,
double DPPCLKDelayCNVCCursor,
double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
enum output_format_class OutputFormat,
unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
unsigned int GPUVMPageTableLevels,
bool GPUVMEnable,
bool HostVMEnable,
unsigned int HostVMMaxNonCachedPageTableLevels,
double HostVMMinPageSize,
bool DynamicMetadataEnable,
bool DynamicMetadataVMEnabled,
int DynamicMetadataLinesBeforeActiveRequired,
unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@ -846,7 +761,6 @@ static bool CalculatePrefetchSchedule(
unsigned int MaxNumSwathY,
double PrefetchSourceLinesC,
unsigned int SwathWidthC,
int BytePerPixelC,
double VInitPreFillC,
unsigned int MaxNumSwathC,
long swath_width_luma_ub,
@ -854,9 +768,6 @@ static bool CalculatePrefetchSchedule(
unsigned int SwathHeightY,
unsigned int SwathHeightC,
double TWait,
bool ProgressiveToInterlaceUnitInOPP,
double *DSTXAfterScaler,
double *DSTYAfterScaler,
double *DestinationLinesForPrefetch,
double *PrefetchBandwidth,
double *DestinationLinesToRequestVMInVBlank,
@ -865,15 +776,10 @@ static bool CalculatePrefetchSchedule(
double *VRatioPrefetchC,
double *RequiredPrefetchPixDataBWLuma,
double *RequiredPrefetchPixDataBWChroma,
bool *NotEnoughTimeForDynamicMetadata,
double *Tno_bw,
double *prefetch_vmrow_bw,
double *Tdmdl_vm,
double *Tdmdl,
unsigned int *VUpdateOffsetPix,
double *VUpdateWidthPix,
double *VReadyOffsetPix)
bool *NotEnoughTimeForDynamicMetadata)
{
struct vba_vars_st *v = &mode_lib->vba;
double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater;
bool MyError = false;
unsigned int DPPCycles = 0, DISPCLKCycles = 0;
double DSTTotalPixelsAfterScaler = 0;
@ -905,26 +811,26 @@ static bool CalculatePrefetchSchedule(
double Tdmec = 0;
double Tdmsks = 0;
if (GPUVMEnable == true && HostVMEnable == true) {
HostVMInefficiencyFactor = PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
if (v->GPUVMEnable == true && v->HostVMEnable == true) {
HostVMInefficiencyFactor = v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData / v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly;
HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMInefficiencyFactor = 1;
HostVMDynamicLevelsTrips = 0;
}
CalculateDynamicMetadataParameters(
MaxInterDCNTileRepeaters,
v->MaxInterDCNTileRepeaters,
myPipe->DPPCLK,
myPipe->DISPCLK,
myPipe->DCFCLKDeepSleep,
myPipe->PixelClock,
myPipe->HTotal,
myPipe->VBlank,
DynamicMetadataTransmittedBytes,
DynamicMetadataLinesBeforeActiveRequired,
v->DynamicMetadataTransmittedBytes[k],
v->DynamicMetadataLinesBeforeActiveRequired[k],
myPipe->InterlaceEnable,
ProgressiveToInterlaceUnitInOPP,
v->ProgressiveToInterlaceUnitInOPP,
&Tsetup,
&Tdmbf,
&Tdmec,
@ -932,16 +838,16 @@ static bool CalculatePrefetchSchedule(
LineTime = myPipe->HTotal / myPipe->PixelClock;
trip_to_mem = UrgentLatency;
Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
Tvm_trips = UrgentExtraLatency + trip_to_mem * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
if (DynamicMetadataVMEnabled == true && GPUVMEnable == true) {
*Tdmdl = TWait + Tvm_trips + trip_to_mem;
if (v->DynamicMetadataVMEnabled == true && v->GPUVMEnable == true) {
v->Tdmdl[k] = TWait + Tvm_trips + trip_to_mem;
} else {
*Tdmdl = TWait + UrgentExtraLatency;
v->Tdmdl[k] = TWait + UrgentExtraLatency;
}
if (DynamicMetadataEnable == true) {
if (VStartup * LineTime < Tsetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
if (v->DynamicMetadataEnable[k] == true) {
if (VStartup * LineTime < Tsetup + v->Tdmdl[k] + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
} else {
*NotEnoughTimeForDynamicMetadata = false;
@ -949,39 +855,39 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", Tdmbf);
dml_print("DML: Tdmec: %fus - time dio takes to transfer dmd\n", Tdmec);
dml_print("DML: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", Tdmsks);
dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", *Tdmdl);
dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", v->Tdmdl[k]);
}
} else {
*NotEnoughTimeForDynamicMetadata = false;
}
*Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true && GPUVMEnable == true ? TWait + Tvm_trips : 0);
v->Tdmdl_vm[k] = (v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true && v->GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCL;
else
DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCLLBOnly;
DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
DPPCycles = DPPCycles + myPipe->NumberOfCursors * v->DPPCLKDelayCNVCCursor;
DISPCLKCycles = DISPCLKDelaySubtotal;
DISPCLKCycles = v->DISPCLKDelaySubtotal;
if (myPipe->DPPCLK == 0.0 || myPipe->DISPCLK == 0.0)
return true;
*DSTXAfterScaler = DPPCycles * myPipe->PixelClock / myPipe->DPPCLK + DISPCLKCycles * myPipe->PixelClock / myPipe->DISPCLK
v->DSTXAfterScaler[k] = DPPCycles * myPipe->PixelClock / myPipe->DPPCLK + DISPCLKCycles * myPipe->PixelClock / myPipe->DISPCLK
+ DSCDelay;
*DSTXAfterScaler = *DSTXAfterScaler + ((myPipe->ODMCombineEnabled)?18:0) + (myPipe->DPPPerPlane - 1) * DPP_RECOUT_WIDTH;
v->DSTXAfterScaler[k] = v->DSTXAfterScaler[k] + ((myPipe->ODMCombineEnabled)?18:0) + (myPipe->DPPPerPlane - 1) * DPP_RECOUT_WIDTH;
if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && ProgressiveToInterlaceUnitInOPP))
*DSTYAfterScaler = 1;
if (v->OutputFormat[k] == dm_420 || (myPipe->InterlaceEnable && v->ProgressiveToInterlaceUnitInOPP))
v->DSTYAfterScaler[k] = 1;
else
*DSTYAfterScaler = 0;
v->DSTYAfterScaler[k] = 0;
DSTTotalPixelsAfterScaler = *DSTYAfterScaler * myPipe->HTotal + *DSTXAfterScaler;
*DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
*DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * myPipe->HTotal));
DSTTotalPixelsAfterScaler = v->DSTYAfterScaler[k] * myPipe->HTotal + v->DSTXAfterScaler[k];
v->DSTYAfterScaler[k] = dml_floor(DSTTotalPixelsAfterScaler / myPipe->HTotal, 1);
v->DSTXAfterScaler[k] = DSTTotalPixelsAfterScaler - ((double) (v->DSTYAfterScaler[k] * myPipe->HTotal));
MyError = false;
@ -990,33 +896,33 @@ static bool CalculatePrefetchSchedule(
Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1) / 4 * LineTime;
Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1) / 4 * LineTime;
if (GPUVMEnable) {
if (GPUVMPageTableLevels >= 3) {
*Tno_bw = UrgentExtraLatency + trip_to_mem * ((GPUVMPageTableLevels - 2) - 1);
if (v->GPUVMEnable) {
if (v->GPUVMMaxPageTableLevels >= 3) {
v->Tno_bw[k] = UrgentExtraLatency + trip_to_mem * ((v->GPUVMMaxPageTableLevels - 2) - 1);
} else
*Tno_bw = 0;
v->Tno_bw[k] = 0;
} else if (!myPipe->DCCEnable)
*Tno_bw = LineTime;
v->Tno_bw[k] = LineTime;
else
*Tno_bw = LineTime / 4;
v->Tno_bw[k] = LineTime / 4;
dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime
- (*DSTYAfterScaler + *DSTXAfterScaler / myPipe->HTotal);
dst_y_prefetch_equ = VStartup - (Tsetup + dml_max(TWait + TCalc, v->Tdmdl[k])) / LineTime
- (v->DSTYAfterScaler[k] + v->DSTXAfterScaler[k] / myPipe->HTotal);
dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
Lsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC);
Tsw_oto = Lsw_oto * LineTime;
prefetch_bw_oto = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC) / Tsw_oto;
prefetch_bw_oto = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * v->BytePerPixelC[k]) / Tsw_oto;
if (GPUVMEnable == true) {
Tvm_oto = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
if (v->GPUVMEnable == true) {
Tvm_oto = dml_max3(v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
Tvm_trips,
LineTime / 4.0);
} else
Tvm_oto = LineTime / 4.0;
if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_oto = dml_max3(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
LineTime - Tvm_oto, LineTime / 4);
@ -1042,10 +948,10 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: Tdmbf: %fus - time for dmd transfer from dchub to dio output buffer\n", Tdmbf);
dml_print("DML: Tdmec: %fus - time dio takes to transfer dmd\n", Tdmec);
dml_print("DML: Tdmsks: %fus - time before active dmd must complete transmission at dio\n", Tdmsks);
dml_print("DML: Tdmdl_vm: %fus - time for vm stages of dmd \n", *Tdmdl_vm);
dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", *Tdmdl);
dml_print("DML: dst_x_after_scl: %f pixels - number of pixel clocks pipeline and buffer delay after scaler \n", *DSTXAfterScaler);
dml_print("DML: dst_y_after_scl: %d lines - number of lines of pipeline and buffer delay after scaler \n", (int)*DSTYAfterScaler);
dml_print("DML: Tdmdl_vm: %fus - time for vm stages of dmd \n", v->Tdmdl_vm[k]);
dml_print("DML: Tdmdl: %fus - time for fabric to become ready and fetch dmd \n", v->Tdmdl[k]);
dml_print("DML: dst_x_after_scl: %f pixels - number of pixel clocks pipeline and buffer delay after scaler \n", v->DSTXAfterScaler[k]);
dml_print("DML: dst_y_after_scl: %d lines - number of lines of pipeline and buffer delay after scaler \n", (int)v->DSTYAfterScaler[k]);
*PrefetchBandwidth = 0;
*DestinationLinesToRequestVMInVBlank = 0;
@ -1059,26 +965,26 @@ static bool CalculatePrefetchSchedule(
double PrefetchBandwidth3 = 0;
double PrefetchBandwidth4 = 0;
if (Tpre_rounded - *Tno_bw > 0)
if (Tpre_rounded - v->Tno_bw[k] > 0)
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte
+ 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor
+ PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY
+ PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC)
/ (Tpre_rounded - *Tno_bw);
+ PrefetchSourceLinesC * swath_width_chroma_ub * v->BytePerPixelC[k])
/ (Tpre_rounded - v->Tno_bw[k]);
else
PrefetchBandwidth1 = 0;
if (VStartup == MaxVStartup && (PrefetchBandwidth1 > 4 * prefetch_bw_oto) && (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - *Tno_bw) > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - *Tno_bw);
if (VStartup == MaxVStartup && (PrefetchBandwidth1 > 4 * prefetch_bw_oto) && (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - v->Tno_bw[k]) > 0) {
PrefetchBandwidth1 = (PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor + 2 * MetaRowByte + 2 * PixelPTEBytesPerRow * HostVMInefficiencyFactor) / (Tpre_rounded - Tsw_oto / 4 - 0.75 * LineTime - v->Tno_bw[k]);
}
if (Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded > 0)
if (Tpre_rounded - v->Tno_bw[k] - 2 * Tr0_trips_rounded > 0)
PrefetchBandwidth2 = (PDEAndMetaPTEBytesFrame *
HostVMInefficiencyFactor + PrefetchSourceLinesY *
swath_width_luma_ub * BytePerPixelY +
PrefetchSourceLinesC * swath_width_chroma_ub *
BytePerPixelC) /
(Tpre_rounded - *Tno_bw - 2 * Tr0_trips_rounded);
v->BytePerPixelC[k]) /
(Tpre_rounded - v->Tno_bw[k] - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth2 = 0;
@ -1086,7 +992,7 @@ static bool CalculatePrefetchSchedule(
PrefetchBandwidth3 = (2 * MetaRowByte + 2 * PixelPTEBytesPerRow *
HostVMInefficiencyFactor + PrefetchSourceLinesY *
swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC *
swath_width_chroma_ub * BytePerPixelC) / (Tpre_rounded -
swath_width_chroma_ub * v->BytePerPixelC[k]) / (Tpre_rounded -
Tvm_trips_rounded);
else
PrefetchBandwidth3 = 0;
@ -1096,7 +1002,7 @@ static bool CalculatePrefetchSchedule(
}
if (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded > 0)
PrefetchBandwidth4 = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * BytePerPixelC)
PrefetchBandwidth4 = (PrefetchSourceLinesY * swath_width_luma_ub * BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * v->BytePerPixelC[k])
/ (Tpre_rounded - Tvm_trips_rounded - 2 * Tr0_trips_rounded);
else
PrefetchBandwidth4 = 0;
@ -1107,7 +1013,7 @@ static bool CalculatePrefetchSchedule(
bool Case3OK;
if (PrefetchBandwidth1 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
if (v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth1
>= Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth1 >= Tr0_trips_rounded) {
Case1OK = true;
} else {
@ -1118,7 +1024,7 @@ static bool CalculatePrefetchSchedule(
}
if (PrefetchBandwidth2 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
if (v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth2
>= Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth2 < Tr0_trips_rounded) {
Case2OK = true;
} else {
@ -1129,7 +1035,7 @@ static bool CalculatePrefetchSchedule(
}
if (PrefetchBandwidth3 > 0) {
if (*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3
if (v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / PrefetchBandwidth3
< Tvm_trips_rounded && (MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / PrefetchBandwidth3 >= Tr0_trips_rounded) {
Case3OK = true;
} else {
@ -1152,13 +1058,13 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: prefetch_bw_equ: %f\n", prefetch_bw_equ);
if (prefetch_bw_equ > 0) {
if (GPUVMEnable) {
Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
if (v->GPUVMEnable) {
Tvm_equ = dml_max3(v->Tno_bw[k] + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_equ, Tvm_trips, LineTime / 4);
} else {
Tvm_equ = LineTime / 4;
}
if ((GPUVMEnable || myPipe->DCCEnable)) {
if ((v->GPUVMEnable || myPipe->DCCEnable)) {
Tr0_equ = dml_max4(
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_equ,
Tr0_trips,
@ -1227,7 +1133,7 @@ static bool CalculatePrefetchSchedule(
}
*RequiredPrefetchPixDataBWLuma = (double) PrefetchSourceLinesY / LinesToRequestPrefetchPixelData * BytePerPixelY * swath_width_luma_ub / LineTime;
*RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData * BytePerPixelC * swath_width_chroma_ub / LineTime;
*RequiredPrefetchPixDataBWChroma = (double) PrefetchSourceLinesC / LinesToRequestPrefetchPixelData * v->BytePerPixelC[k] * swath_width_chroma_ub / LineTime;
} else {
MyError = true;
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
@ -1243,9 +1149,9 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: Tr0: %fus - time to fetch first row of data pagetables and first row of meta data (done in parallel)\n", TimeForFetchingRowInVBlank);
dml_print("DML: Tr1: %fus - time to fetch second row of data pagetables and second row of meta data (done in parallel)\n", TimeForFetchingRowInVBlank);
dml_print("DML: Tsw: %fus = time to fetch enough pixel data and cursor data to feed the scalers init position and detile\n", (double)LinesToRequestPrefetchPixelData * LineTime);
dml_print("DML: To: %fus - time for propagation from scaler to optc\n", (*DSTYAfterScaler + ((*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime);
dml_print("DML: To: %fus - time for propagation from scaler to optc\n", (v->DSTYAfterScaler[k] + ((v->DSTXAfterScaler[k]) / (double) myPipe->HTotal)) * LineTime);
dml_print("DML: Tvstartup - Tsetup - Tcalc - Twait - Tpre - To > 0\n");
dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime - TimeForFetchingMetaPTE - 2 * TimeForFetchingRowInVBlank - (*DSTYAfterScaler + ((*DSTXAfterScaler) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - Tsetup);
dml_print("DML: Tslack(pre): %fus - time left over in schedule\n", VStartup * LineTime - TimeForFetchingMetaPTE - 2 * TimeForFetchingRowInVBlank - (v->DSTYAfterScaler[k] + ((v->DSTXAfterScaler[k]) / (double) myPipe->HTotal)) * LineTime - TWait - TCalc - Tsetup);
dml_print("DML: row_bytes = dpte_row_bytes (per_pipe) = PixelPTEBytesPerRow = : %d\n", PixelPTEBytesPerRow);
} else {
@ -1276,7 +1182,7 @@ static bool CalculatePrefetchSchedule(
dml_print("DML: MyErr set %s:%d\n", __FILE__, __LINE__);
}
*prefetch_vmrow_bw = dml_max(prefetch_vm_bw, prefetch_row_bw);
v->prefetch_vmrow_bw[k] = dml_max(prefetch_vm_bw, prefetch_row_bw);
}
if (MyError) {
@ -2437,30 +2343,12 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->ErrorResult[k] = CalculatePrefetchSchedule(
mode_lib,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
k,
&myPipe,
v->DSCDelay[k],
v->DPPCLKDelaySubtotal
+ v->DPPCLKDelayCNVCFormater,
v->DPPCLKDelaySCL,
v->DPPCLKDelaySCLLBOnly,
v->DPPCLKDelayCNVCCursor,
v->DISPCLKDelaySubtotal,
(unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
v->OutputFormat[k],
v->MaxInterDCNTileRepeaters,
dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
v->MaxVStartupLines[k],
v->GPUVMMaxPageTableLevels,
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->HostVMMinPageSize,
v->DynamicMetadataEnable[k],
v->DynamicMetadataVMEnabled,
v->DynamicMetadataLinesBeforeActiveRequired[k],
v->DynamicMetadataTransmittedBytes[k],
v->UrgentLatency,
v->UrgentExtraLatency,
v->TCalc,
@ -2474,7 +2362,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->MaxNumSwathY[k],
v->PrefetchSourceLinesC[k],
v->SwathWidthC[k],
v->BytePerPixelC[k],
v->VInitPreFillC[k],
v->MaxNumSwathC[k],
v->swath_width_luma_ub[k],
@ -2482,9 +2369,6 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SwathHeightY[k],
v->SwathHeightC[k],
TWait,
v->ProgressiveToInterlaceUnitInOPP,
&v->DSTXAfterScaler[k],
&v->DSTYAfterScaler[k],
&v->DestinationLinesForPrefetch[k],
&v->PrefetchBandwidth[k],
&v->DestinationLinesToRequestVMInVBlank[k],
@ -2493,14 +2377,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
&v->VRatioPrefetchC[k],
&v->RequiredPrefetchPixDataBWLuma[k],
&v->RequiredPrefetchPixDataBWChroma[k],
&v->NotEnoughTimeForDynamicMetadata[k],
&v->Tno_bw[k],
&v->prefetch_vmrow_bw[k],
&v->Tdmdl_vm[k],
&v->Tdmdl[k],
&v->VUpdateOffsetPix[k],
&v->VUpdateWidthPix[k],
&v->VReadyOffsetPix[k]);
&v->NotEnoughTimeForDynamicMetadata[k]);
if (v->BlendingAndTiming[k] == k) {
double TotalRepeaterDelayTime = v->MaxInterDCNTileRepeaters * (2 / v->DPPCLK[k] + 3 / v->DISPCLK);
v->VUpdateWidthPix[k] = (14 / v->DCFCLKDeepSleep + 12 / v->DPPCLK[k] + TotalRepeaterDelayTime) * v->PixelClock[k];
@ -2730,62 +2607,23 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
v->NumberOfActivePlanes,
v->MaxLineBufferLines,
v->LineBufferSize,
v->DPPOutputBufferPixels,
v->DETBufferSizeInKByte[0],
v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
v->GPUVMEnable,
v->dpte_group_bytes,
v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
v->WritebackLatency,
v->WritebackChunkSize,
v->SOCCLK,
v->FinalDRAMClockChangeLatency,
v->SRExitTime,
v->SREnterPlusExitTime,
v->DCFCLKDeepSleep,
v->DPPPerPlane,
v->DCCEnable,
v->DPPCLK,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
v->HRatio,
v->HRatioChroma,
v->vtaps,
v->VTAPsChroma,
v->VRatio,
v->VRatioChroma,
v->HTotal,
v->PixelClock,
v->BlendingAndTiming,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
v->DSTXAfterScaler,
v->DSTYAfterScaler,
v->WritebackEnable,
v->WritebackPixelFormat,
v->WritebackDestinationWidth,
v->WritebackDestinationHeight,
v->WritebackSourceHeight,
&DRAMClockChangeSupport,
&v->UrgentWatermark,
&v->WritebackUrgentWatermark,
&v->DRAMClockChangeWatermark,
&v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->MinActiveDRAMClockChangeLatencySupported);
&DRAMClockChangeSupport);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
@ -4770,29 +4608,12 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule(
mode_lib,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
v->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
k,
&myPipe,
v->DSCDelayPerState[i][k],
v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater,
v->DPPCLKDelaySCL,
v->DPPCLKDelaySCLLBOnly,
v->DPPCLKDelayCNVCCursor,
v->DISPCLKDelaySubtotal,
v->SwathWidthYThisState[k] / v->HRatio[k],
v->OutputFormat[k],
v->MaxInterDCNTileRepeaters,
dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
v->MaximumVStartup[i][j][k],
v->GPUVMMaxPageTableLevels,
v->GPUVMEnable,
v->HostVMEnable,
v->HostVMMaxNonCachedPageTableLevels,
v->HostVMMinPageSize,
v->DynamicMetadataEnable[k],
v->DynamicMetadataVMEnabled,
v->DynamicMetadataLinesBeforeActiveRequired[k],
v->DynamicMetadataTransmittedBytes[k],
v->UrgLatency[i],
v->ExtraLatency,
v->TimeCalc,
@ -4806,7 +4627,6 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->MaxNumSwY[k],
v->PrefetchLinesC[i][j][k],
v->SwathWidthCThisState[k],
v->BytePerPixelC[k],
v->PrefillC[k],
v->MaxNumSwC[k],
v->swath_width_luma_ub_this_state[k],
@ -4814,9 +4634,6 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->SwathHeightYThisState[k],
v->SwathHeightCThisState[k],
v->TWait,
v->ProgressiveToInterlaceUnitInOPP,
&v->DSTXAfterScaler[k],
&v->DSTYAfterScaler[k],
&v->LineTimesForPrefetch[k],
&v->PrefetchBW[k],
&v->LinesForMetaPTE[k],
@ -4825,14 +4642,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&v->VRatioPreC[i][j][k],
&v->RequiredPrefetchPixelDataBWLuma[i][j][k],
&v->RequiredPrefetchPixelDataBWChroma[i][j][k],
&v->NoTimeForDynamicMetadata[i][j][k],
&v->Tno_bw[k],
&v->prefetch_vmrow_bw[k],
&v->Tdmdl_vm[k],
&v->Tdmdl[k],
&v->VUpdateOffsetPix[k],
&v->VUpdateWidthPix[k],
&v->VReadyOffsetPix[k]);
&v->NoTimeForDynamicMetadata[i][j][k]);
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
@ -5007,62 +4817,23 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
v->NumberOfActivePlanes,
v->MaxLineBufferLines,
v->LineBufferSize,
v->DPPOutputBufferPixels,
v->DETBufferSizeInKByte[0],
v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
v->GPUVMEnable,
v->dpte_group_bytes,
v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
v->WritebackLatency,
v->WritebackChunkSize,
v->SOCCLKPerState[i],
v->FinalDRAMClockChangeLatency,
v->SRExitTime,
v->SREnterPlusExitTime,
v->ProjectedDCFCLKDeepSleep[i][j],
v->NoOfDPPThisState,
v->DCCEnable,
v->RequiredDPPCLKThisState,
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
v->HRatio,
v->HRatioChroma,
v->vtaps,
v->VTAPsChroma,
v->VRatio,
v->VRatioChroma,
v->HTotal,
v->PixelClock,
v->BlendingAndTiming,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
v->DSTXAfterScaler,
v->DSTYAfterScaler,
v->WritebackEnable,
v->WritebackPixelFormat,
v->WritebackDestinationWidth,
v->WritebackDestinationHeight,
v->WritebackSourceHeight,
&v->DRAMClockChangeSupport[i][j],
&v->UrgentWatermark,
&v->WritebackUrgentWatermark,
&v->DRAMClockChangeWatermark,
&v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->MinActiveDRAMClockChangeLatencySupported);
&v->DRAMClockChangeSupport[i][j]);
}
}
@ -5179,63 +4950,25 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
unsigned int NumberOfActivePlanes,
unsigned int MaxLineBufferLines,
unsigned int LineBufferSize,
unsigned int DPPOutputBufferPixels,
unsigned int DETBufferSizeInKByte,
unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
bool GPUVMEnable,
unsigned int dpte_group_bytes[],
unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
double WritebackLatency,
double WritebackChunkSize,
double SOCCLK,
double DRAMClockChangeLatency,
double SRExitTime,
double SREnterPlusExitTime,
double DCFCLKDeepSleep,
unsigned int DPPPerPlane[],
bool DCCEnable[],
double DPPCLK[],
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
double HRatio[],
double HRatioChroma[],
unsigned int vtaps[],
unsigned int VTAPsChroma[],
double VRatio[],
double VRatioChroma[],
unsigned int HTotal[],
double PixelClock[],
unsigned int BlendingAndTiming[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
bool WritebackEnable[],
enum source_format_class WritebackPixelFormat[],
double WritebackDestinationWidth[],
double WritebackDestinationHeight[],
double WritebackSourceHeight[],
enum clock_change_support *DRAMClockChangeSupport,
double *UrgentWatermark,
double *WritebackUrgentWatermark,
double *DRAMClockChangeWatermark,
double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *MinActiveDRAMClockChangeLatencySupported)
enum clock_change_support *DRAMClockChangeSupport)
{
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY = 0;
double EffectiveLBLatencyHidingC = 0;
double LinesInDETY[DC__NUM_DPP__MAX] = { 0 };
@ -5254,101 +4987,101 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double WritebackDRAMClockChangeLatencyHiding = 0;
unsigned int k, j;
mode_lib->vba.TotalActiveDPP = 0;
mode_lib->vba.TotalDCCActiveDPP = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP + DPPPerPlane[k];
if (DCCEnable[k] == true) {
mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP + DPPPerPlane[k];
v->TotalActiveDPP = 0;
v->TotalDCCActiveDPP = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
v->TotalActiveDPP = v->TotalActiveDPP + DPPPerPlane[k];
if (v->DCCEnable[k] == true) {
v->TotalDCCActiveDPP = v->TotalDCCActiveDPP + DPPPerPlane[k];
}
}
*UrgentWatermark = UrgentLatency + ExtraLatency;
v->UrgentWatermark = UrgentLatency + ExtraLatency;
*DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
v->DRAMClockChangeWatermark = v->FinalDRAMClockChangeLatency + v->UrgentWatermark;
mode_lib->vba.TotalActiveWriteback = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (WritebackEnable[k] == true) {
mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + 1;
v->TotalActiveWriteback = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
if (mode_lib->vba.TotalActiveWriteback <= 1) {
*WritebackUrgentWatermark = WritebackLatency;
if (v->TotalActiveWriteback <= 1) {
v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
*WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (mode_lib->vba.TotalActiveWriteback <= 1) {
*WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
if (v->TotalActiveWriteback <= 1) {
v->WritebackDRAMClockChangeWatermark = v->FinalDRAMClockChangeLatency + v->WritebackLatency;
} else {
*WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
v->WritebackDRAMClockChangeWatermark = v->FinalDRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
for (k = 0; k < NumberOfActivePlanes; ++k) {
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
mode_lib->vba.LBLatencyHidingSourceLinesY = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
v->LBLatencyHidingSourceLinesY = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
mode_lib->vba.LBLatencyHidingSourceLinesC = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
v->LBLatencyHidingSourceLinesC = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
LinesInDETY[k] = (double) DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = mode_lib->vba.DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY[k] - *UrgentWatermark - (HTotal[k] / PixelClock[k]) * (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) - *DRAMClockChangeWatermark;
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY[k] - v->UrgentWatermark - (v->HTotal[k] / v->PixelClock[k]) * (v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) - v->DRAMClockChangeWatermark;
if (NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC - *UrgentWatermark - (HTotal[k] / PixelClock[k]) * (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) - *DRAMClockChangeWatermark;
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC - v->UrgentWatermark - (v->HTotal[k] / v->PixelClock[k]) * (v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) - v->DRAMClockChangeWatermark;
if (NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
if (WritebackEnable[k] == true) {
if (v->WritebackEnable[k] == true) {
WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024 / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
if (WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024 / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
if (mode_lib->vba.WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
if (v->WritebackConfiguration == dm_whole_buffer_for_single_stream_interleave) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding * 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - mode_lib->vba.WritebackDRAMClockChangeWatermark;
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k], WritebackDRAMClockChangeLatencyMargin);
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(v->ActiveDRAMClockChangeLatencyMargin[k], WritebackDRAMClockChangeLatencyMargin);
}
}
mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] < mode_lib->vba.MinActiveDRAMClockChangeMargin) {
mode_lib->vba.MinActiveDRAMClockChangeMargin = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
if (BlendingAndTiming[k] == k) {
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
for (j = 0; j < NumberOfActivePlanes; ++j) {
if (BlendingAndTiming[k] == j) {
for (j = 0; j < v->NumberOfActivePlanes; ++j) {
if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
@ -5356,40 +5089,40 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
*MinActiveDRAMClockChangeLatencySupported = mode_lib->vba.MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->FinalDRAMClockChangeLatency;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) && mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin) && v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
}
mode_lib->vba.TotalNumberOfActiveOTG = 0;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (BlendingAndTiming[k] == k) {
mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG + 1;
v->TotalNumberOfActiveOTG = 0;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k) {
v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
if (v->MinActiveDRAMClockChangeMargin > 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
} else if (((mode_lib->vba.SynchronizedVBlank == true || mode_lib->vba.TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
} else if (((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
FullDETBufferingTimeYStutterCriticalPlane = FullDETBufferingTimeY[0];
for (k = 0; k < NumberOfActivePlanes; ++k) {
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (FullDETBufferingTimeY[k] <= FullDETBufferingTimeYStutterCriticalPlane) {
FullDETBufferingTimeYStutterCriticalPlane = FullDETBufferingTimeY[k];
TimeToFinishSwathTransferStutterCriticalPlane = (SwathHeightY[k] - (LinesInDETY[k] - LinesInDETYRoundedDownToSwath[k])) * (HTotal[k] / PixelClock[k]) / VRatio[k];
TimeToFinishSwathTransferStutterCriticalPlane = (SwathHeightY[k] - (LinesInDETY[k] - LinesInDETYRoundedDownToSwath[k])) * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
}
}
*StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
*StutterEnterPlusExitWatermark = dml_max(SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep, TimeToFinishSwathTransferStutterCriticalPlane);
v->StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
v->StutterEnterPlusExitWatermark = dml_max(v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep, TimeToFinishSwathTransferStutterCriticalPlane);
}

View File

@ -1610,38 +1610,12 @@ dce110_select_crtc_source(struct pipe_ctx *pipe_ctx)
struct dc_bios *bios = link->ctx->dc_bios;
struct bp_crtc_source_select crtc_source_select = {0};
enum engine_id engine_id = link->link_enc->preferred_engine;
uint8_t bit_depth;
if (dc_is_rgb_signal(pipe_ctx->stream->signal))
engine_id = link->link_enc->analog_engine;
switch (pipe_ctx->stream->timing.display_color_depth) {
case COLOR_DEPTH_UNDEFINED:
bit_depth = 0;
break;
case COLOR_DEPTH_666:
bit_depth = 6;
break;
default:
case COLOR_DEPTH_888:
bit_depth = 8;
break;
case COLOR_DEPTH_101010:
bit_depth = 10;
break;
case COLOR_DEPTH_121212:
bit_depth = 12;
break;
case COLOR_DEPTH_141414:
bit_depth = 14;
break;
case COLOR_DEPTH_161616:
bit_depth = 16;
break;
}
crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst;
crtc_source_select.bit_depth = bit_depth;
crtc_source_select.color_depth = pipe_ctx->stream->timing.display_color_depth;
crtc_source_select.engine_id = engine_id;
crtc_source_select.sink_signal = pipe_ctx->stream->signal;

View File

@ -932,7 +932,7 @@ static bool link_detect_dac_load_detect(struct dc_link *link)
struct link_encoder *link_enc = link->link_enc;
enum engine_id engine_id = link_enc->preferred_engine;
enum dal_device_type device_type = DEVICE_TYPE_CRT;
enum bp_result bp_result;
enum bp_result bp_result = BP_RESULT_UNSUPPORTED;
uint32_t enum_id;
switch (engine_id) {
@ -946,7 +946,9 @@ static bool link_detect_dac_load_detect(struct dc_link *link)
break;
}
bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id);
if (bios->funcs->dac_load_detection)
bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id);
return bp_result == BP_RESULT_OK;
}

View File

@ -136,7 +136,7 @@ struct bp_crtc_source_select {
enum engine_id engine_id;
enum controller_id controller_id;
enum signal_type sink_signal;
uint8_t bit_depth;
enum dc_color_depth color_depth;
};
struct bp_transmitter_control {

View File

@ -2455,24 +2455,21 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
}
for (i = 0; i < NUM_LINK_LEVELS; i++) {
if (pptable->PcieGenSpeed[i] > pcie_gen_cap ||
pptable->PcieLaneCount[i] > pcie_width_cap) {
dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
pptable->PcieGenSpeed[i] > pcie_gen_cap ?
pcie_gen_cap : pptable->PcieGenSpeed[i];
dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
pptable->PcieLaneCount[i] > pcie_width_cap ?
pcie_width_cap : pptable->PcieLaneCount[i];
smu_pcie_arg = i << 16;
smu_pcie_arg |= pcie_gen_cap << 8;
smu_pcie_arg |= pcie_width_cap;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
smu_pcie_arg,
NULL);
if (ret)
break;
}
dpm_context->dpm_tables.pcie_table.pcie_gen[i] =
pptable->PcieGenSpeed[i] > pcie_gen_cap ?
pcie_gen_cap : pptable->PcieGenSpeed[i];
dpm_context->dpm_tables.pcie_table.pcie_lane[i] =
pptable->PcieLaneCount[i] > pcie_width_cap ?
pcie_width_cap : pptable->PcieLaneCount[i];
smu_pcie_arg = i << 16;
smu_pcie_arg |= dpm_context->dpm_tables.pcie_table.pcie_gen[i] << 8;
smu_pcie_arg |= dpm_context->dpm_tables.pcie_table.pcie_lane[i];
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
smu_pcie_arg,
NULL);
if (ret)
return ret;
}
return ret;

View File

@ -2923,8 +2923,13 @@ static int smu_v13_0_0_mode1_reset(struct smu_context *smu)
break;
}
if (!ret)
if (!ret) {
/* disable mmio access while doing mode 1 reset*/
smu->adev->no_hw_access = true;
/* ensure no_hw_access is globally visible before any MMIO */
smp_mb();
msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
}
return ret;
}

View File

@ -2143,10 +2143,15 @@ static int smu_v14_0_2_mode1_reset(struct smu_context *smu)
ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset);
if (!ret) {
if (amdgpu_emu_mode == 1)
if (amdgpu_emu_mode == 1) {
msleep(50000);
else
} else {
/* disable mmio access while doing mode 1 reset*/
smu->adev->no_hw_access = true;
/* ensure no_hw_access is globally visible before any MMIO */
smp_mb();
msleep(1000);
}
}
return ret;

View File

@ -1162,8 +1162,18 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
new_state->self_refresh_active;
}
static void
encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
/**
* drm_atomic_helper_commit_encoder_bridge_disable - disable bridges and encoder
* @dev: DRM device
* @state: the driver state object
*
* Loops over all connectors in the current state and if the CRTC needs
* it, disables the bridge chain all the way, then disables the encoder
* afterwards.
*/
void
drm_atomic_helper_commit_encoder_bridge_disable(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
@ -1229,9 +1239,18 @@ encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
}
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_disable);
static void
crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
/**
* drm_atomic_helper_commit_crtc_disable - disable CRTSs
* @dev: DRM device
* @state: the driver state object
*
* Loops over all CRTCs in the current state and if the CRTC needs
* it, disables it.
*/
void
drm_atomic_helper_commit_crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
@ -1282,9 +1301,18 @@ crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
drm_crtc_vblank_put(crtc);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_disable);
static void
encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
/**
* drm_atomic_helper_commit_encoder_bridge_post_disable - post-disable encoder bridges
* @dev: DRM device
* @state: the driver state object
*
* Loops over all connectors in the current state and if the CRTC needs
* it, post-disables all encoder bridges.
*/
void
drm_atomic_helper_commit_encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
@ -1335,15 +1363,16 @@ encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *sta
drm_bridge_put(bridge);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_post_disable);
static void
disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
{
encoder_bridge_disable(dev, state);
drm_atomic_helper_commit_encoder_bridge_disable(dev, state);
crtc_disable(dev, state);
drm_atomic_helper_commit_encoder_bridge_post_disable(dev, state);
encoder_bridge_post_disable(dev, state);
drm_atomic_helper_commit_crtc_disable(dev, state);
}
/**
@ -1446,8 +1475,17 @@ void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *stat
}
EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
static void
crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
/**
* drm_atomic_helper_commit_crtc_set_mode - set the new mode
* @dev: DRM device
* @state: the driver state object
*
* Loops over all connectors in the current state and if the mode has
* changed, change the mode of the CRTC, then call down the bridge
* chain and change the mode in all bridges as well.
*/
void
drm_atomic_helper_commit_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
@ -1508,6 +1546,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
drm_bridge_put(bridge);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_set_mode);
/**
* drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
@ -1531,12 +1570,21 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_atomic_helper_calc_timestamping_constants(state);
crtc_set_mode(dev, state);
drm_atomic_helper_commit_crtc_set_mode(dev, state);
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
struct drm_atomic_state *state)
/**
* drm_atomic_helper_commit_writebacks - issue writebacks
* @dev: DRM device
* @state: atomic state object being committed
*
* This loops over the connectors, checks if the new state requires
* a writeback job to be issued and in that case issues an atomic
* commit on each connector.
*/
void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
@ -1555,9 +1603,18 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
}
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_writebacks);
static void
encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
/**
* drm_atomic_helper_commit_encoder_bridge_pre_enable - pre-enable bridges
* @dev: DRM device
* @state: atomic state object being committed
*
* This loops over the connectors and if the CRTC needs it, pre-enables
* the entire bridge chain.
*/
void
drm_atomic_helper_commit_encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
@ -1588,9 +1645,18 @@ encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state
drm_bridge_put(bridge);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_pre_enable);
static void
crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
/**
* drm_atomic_helper_commit_crtc_enable - enables the CRTCs
* @dev: DRM device
* @state: atomic state object being committed
*
* This loops over CRTCs in the new state, and of the CRTC needs
* it, enables it.
*/
void
drm_atomic_helper_commit_crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
@ -1619,9 +1685,18 @@ crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
}
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_crtc_enable);
static void
encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
/**
* drm_atomic_helper_commit_encoder_bridge_enable - enables the bridges
* @dev: DRM device
* @state: atomic state object being committed
*
* This loops over all connectors in the new state, and of the CRTC needs
* it, enables the entire bridge chain.
*/
void
drm_atomic_helper_commit_encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
@ -1664,6 +1739,7 @@ encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
drm_bridge_put(bridge);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_encoder_bridge_enable);
/**
* drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
@ -1682,11 +1758,11 @@ encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *state)
{
encoder_bridge_pre_enable(dev, state);
drm_atomic_helper_commit_crtc_enable(dev, state);
crtc_enable(dev, state);
drm_atomic_helper_commit_encoder_bridge_pre_enable(dev, state);
encoder_bridge_enable(dev, state);
drm_atomic_helper_commit_encoder_bridge_enable(dev, state);
drm_atomic_helper_commit_writebacks(dev, state);
}

View File

@ -366,6 +366,9 @@ static void drm_fb_helper_damage_work(struct work_struct *work)
{
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, damage_work);
if (helper->info->state != FBINFO_STATE_RUNNING)
return;
drm_fb_helper_fb_dirty(helper);
}
@ -732,6 +735,13 @@ void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
if (fb_helper->info->state != FBINFO_STATE_RUNNING)
return;
/*
* Cancel pending damage work. During GPU reset, VBlank
* interrupts are disabled and drm_fb_helper_fb_dirty()
* would wait for VBlank timeout otherwise.
*/
cancel_work_sync(&fb_helper->damage_work);
console_lock();
} else {

View File

@ -1692,7 +1692,7 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
{
struct hdmi_context *hdata = arg;
mod_delayed_work(system_wq, &hdata->hotplug_work,
mod_delayed_work(system_percpu_wq, &hdata->hotplug_work,
msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
return IRQ_HANDLED;

View File

@ -1002,12 +1002,6 @@ static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
return PTR_ERR(dsi->next_bridge);
}
/*
* set flag to request the DSI host bridge be pre-enabled before device bridge
* in the chain, so the DSI host is ready when the device bridge is pre-enabled
*/
dsi->next_bridge->pre_enable_prev_first = true;
drm_bridge_add(&dsi->bridge);
ret = component_add(host->dev, &mtk_dsi_component_ops);

View File

@ -30,6 +30,9 @@ ad102_gsp = {
.booter.ctor = ga102_gsp_booter_ctor,
.fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
.fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,

View File

@ -337,18 +337,12 @@ nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
}
int
nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
nvkm_gsp_fwsec_sb_init(struct nvkm_gsp *gsp)
{
return nvkm_gsp_fwsec_init(gsp, &gsp->fws.falcon.sb, "fwsec-sb",
NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB);
}
void
nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
{
nvkm_falcon_fw_dtor(&gsp->fws.falcon.sb);
}
int
nvkm_gsp_fwsec_frts(struct nvkm_gsp *gsp)
{

View File

@ -47,6 +47,9 @@ ga100_gsp = {
.booter.ctor = tu102_gsp_booter_ctor,
.fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
.fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,

View File

@ -158,6 +158,9 @@ ga102_gsp_r535 = {
.booter.ctor = ga102_gsp_booter_ctor,
.fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
.fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,

View File

@ -7,9 +7,8 @@ enum nvkm_acr_lsf_id;
int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
int nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *);
int nvkm_gsp_fwsec_sb(struct nvkm_gsp *);
void nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *);
int nvkm_gsp_fwsec_sb_init(struct nvkm_gsp *gsp);
struct nvkm_gsp_fwif {
int version;
@ -52,6 +51,11 @@ struct nvkm_gsp_func {
struct nvkm_falcon *, struct nvkm_falcon_fw *);
} booter;
struct {
int (*ctor)(struct nvkm_gsp *);
void (*dtor)(struct nvkm_gsp *);
} fwsec_sb;
void (*dtor)(struct nvkm_gsp *);
int (*oneinit)(struct nvkm_gsp *);
int (*init)(struct nvkm_gsp *);
@ -67,6 +71,8 @@ extern const struct nvkm_falcon_func tu102_gsp_flcn;
extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec;
int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
int tu102_gsp_fwsec_sb_ctor(struct nvkm_gsp *);
void tu102_gsp_fwsec_sb_dtor(struct nvkm_gsp *);
int tu102_gsp_oneinit(struct nvkm_gsp *);
int tu102_gsp_init(struct nvkm_gsp *);
int tu102_gsp_fini(struct nvkm_gsp *, bool suspend);
@ -91,5 +97,18 @@ int r535_gsp_fini(struct nvkm_gsp *, bool suspend);
int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gsp **);
static inline int nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
{
if (gsp->func->fwsec_sb.ctor)
return gsp->func->fwsec_sb.ctor(gsp);
return 0;
}
static inline void nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
{
if (gsp->func->fwsec_sb.dtor)
gsp->func->fwsec_sb.dtor(gsp);
}
extern const struct nvkm_gsp_func gv100_gsp;
#endif

View File

@ -30,6 +30,18 @@
#include <nvfw/fw.h>
#include <nvfw/hs.h>
int
tu102_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
{
return nvkm_gsp_fwsec_sb_init(gsp);
}
void
tu102_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
{
nvkm_falcon_fw_dtor(&gsp->fws.falcon.sb);
}
static int
tu102_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
{
@ -370,6 +382,9 @@ tu102_gsp = {
.booter.ctor = tu102_gsp_booter_ctor,
.fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
.fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,

View File

@ -30,6 +30,9 @@ tu116_gsp = {
.booter.ctor = tu102_gsp_booter_ctor,
.fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
.fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,

View File

@ -295,7 +295,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
variant->name, priv);
if (ret != 0) {
dev_err(dev, "%s failed irq %d\n", __func__, ret);
return ret;
goto dev_put;
}
ret = pl111_modeset_init(drm);

View File

@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{
//sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize;
UCHAR clockInfo[] __counted_by(ucNumEntries);
UCHAR clockInfo[] /*__counted_by(ucNumEntries)*/;
}ClockInfoArray;
typedef struct _NonClockInfoArray{

View File

@ -26,9 +26,33 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
tidss_runtime_get(tidss);
drm_atomic_helper_commit_modeset_disables(ddev, old_state);
drm_atomic_helper_commit_planes(ddev, old_state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_modeset_enables(ddev, old_state);
/*
* TI's OLDI and DSI encoders need to be set up before the crtc is
* enabled. Thus drm_atomic_helper_commit_modeset_enables() and
* drm_atomic_helper_commit_modeset_disables() cannot be used here, as
* they enable the crtc before bridges' pre-enable, and disable the crtc
* after bridges' post-disable.
*
* Open code the functions here and first call the bridges' pre-enables,
* then crtc enable, then bridges' post-enable (and vice versa for
* disable).
*/
drm_atomic_helper_commit_encoder_bridge_disable(ddev, old_state);
drm_atomic_helper_commit_crtc_disable(ddev, old_state);
drm_atomic_helper_commit_encoder_bridge_post_disable(ddev, old_state);
drm_atomic_helper_update_legacy_modeset_state(ddev, old_state);
drm_atomic_helper_calc_timestamping_constants(old_state);
drm_atomic_helper_commit_crtc_set_mode(ddev, old_state);
drm_atomic_helper_commit_planes(ddev, old_state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_commit_encoder_bridge_pre_enable(ddev, old_state);
drm_atomic_helper_commit_crtc_enable(ddev, old_state);
drm_atomic_helper_commit_encoder_bridge_enable(ddev, old_state);
drm_atomic_helper_commit_writebacks(ddev, old_state);
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_flip_done(ddev, old_state);

View File

@ -3,7 +3,7 @@ config NOVA_CORE
depends on 64BIT
depends on PCI
depends on RUST
depends on RUST_FW_LOADER_ABSTRACTIONS
select RUST_FW_LOADER_ABSTRACTIONS
select AUXILIARY_BUS
default n
help

View File

@ -588,21 +588,23 @@ impl Cmdq {
header.length(),
);
let payload_length = header.payload_length();
// Check that the driver read area is large enough for the message.
if slice_1.len() + slice_2.len() < header.length() {
if slice_1.len() + slice_2.len() < payload_length {
return Err(EIO);
}
// Cut the message slices down to the actual length of the message.
let (slice_1, slice_2) = if slice_1.len() > header.length() {
// PANIC: we checked above that `slice_1` is at least as long as `msg_header.length()`.
(slice_1.split_at(header.length()).0, &slice_2[0..0])
let (slice_1, slice_2) = if slice_1.len() > payload_length {
// PANIC: we checked above that `slice_1` is at least as long as `payload_length`.
(slice_1.split_at(payload_length).0, &slice_2[0..0])
} else {
(
slice_1,
// PANIC: we checked above that `slice_1.len() + slice_2.len()` is at least as
// large as `msg_header.length()`.
slice_2.split_at(header.length() - slice_1.len()).0,
// large as `payload_length`.
slice_2.split_at(payload_length - slice_1.len()).0,
)
};

View File

@ -141,8 +141,8 @@ unsafe impl AsBytes for GspFwWprMeta {}
// are valid.
unsafe impl FromBytes for GspFwWprMeta {}
type GspFwWprMetaBootResumeInfo = r570_144::GspFwWprMeta__bindgen_ty_1;
type GspFwWprMetaBootInfo = r570_144::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1;
type GspFwWprMetaBootResumeInfo = bindings::GspFwWprMeta__bindgen_ty_1;
type GspFwWprMetaBootInfo = bindings::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1;
impl GspFwWprMeta {
/// Fill in and return a `GspFwWprMeta` suitable for booting `gsp_firmware` using the
@ -150,8 +150,8 @@ impl GspFwWprMeta {
pub(crate) fn new(gsp_firmware: &GspFirmware, fb_layout: &FbLayout) -> Self {
Self(bindings::GspFwWprMeta {
// CAST: we want to store the bits of `GSP_FW_WPR_META_MAGIC` unmodified.
magic: r570_144::GSP_FW_WPR_META_MAGIC as u64,
revision: u64::from(r570_144::GSP_FW_WPR_META_REVISION),
magic: bindings::GSP_FW_WPR_META_MAGIC as u64,
revision: u64::from(bindings::GSP_FW_WPR_META_REVISION),
sysmemAddrOfRadix3Elf: gsp_firmware.radix3_dma_handle(),
sizeOfRadix3Elf: u64::from_safe_cast(gsp_firmware.size),
sysmemAddrOfBootloader: gsp_firmware.bootloader.ucode.dma_handle(),
@ -315,19 +315,19 @@ impl From<MsgFunction> for u32 {
#[repr(u32)]
pub(crate) enum SeqBufOpcode {
// Core operation opcodes
CoreReset = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET,
CoreResume = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME,
CoreStart = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START,
CoreWaitForHalt = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
CoreReset = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET,
CoreResume = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME,
CoreStart = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START,
CoreWaitForHalt = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
// Delay opcode
DelayUs = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US,
DelayUs = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US,
// Register operation opcodes
RegModify = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY,
RegPoll = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL,
RegStore = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE,
RegWrite = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE,
RegModify = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY,
RegPoll = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL,
RegStore = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE,
RegWrite = bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE,
}
impl fmt::Display for SeqBufOpcode {
@ -351,25 +351,25 @@ impl TryFrom<u32> for SeqBufOpcode {
fn try_from(value: u32) -> Result<SeqBufOpcode> {
match value {
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => {
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => {
Ok(SeqBufOpcode::CoreReset)
}
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => {
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => {
Ok(SeqBufOpcode::CoreResume)
}
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => {
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => {
Ok(SeqBufOpcode::CoreStart)
}
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => {
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => {
Ok(SeqBufOpcode::CoreWaitForHalt)
}
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs),
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => {
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs),
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => {
Ok(SeqBufOpcode::RegModify)
}
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll),
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore),
r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite),
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll),
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore),
bindings::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite),
_ => Err(EINVAL),
}
}
@ -385,7 +385,7 @@ impl From<SeqBufOpcode> for u32 {
/// Wrapper for GSP sequencer register write payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
pub(crate) struct RegWritePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_WRITE);
pub(crate) struct RegWritePayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_WRITE);
impl RegWritePayload {
/// Returns the register address.
@ -408,7 +408,7 @@ unsafe impl AsBytes for RegWritePayload {}
/// Wrapper for GSP sequencer register modify payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
pub(crate) struct RegModifyPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY);
pub(crate) struct RegModifyPayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY);
impl RegModifyPayload {
/// Returns the register address.
@ -436,7 +436,7 @@ unsafe impl AsBytes for RegModifyPayload {}
/// Wrapper for GSP sequencer register poll payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
pub(crate) struct RegPollPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_POLL);
pub(crate) struct RegPollPayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_POLL);
impl RegPollPayload {
/// Returns the register address.
@ -469,7 +469,7 @@ unsafe impl AsBytes for RegPollPayload {}
/// Wrapper for GSP sequencer delay payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
pub(crate) struct DelayUsPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_DELAY_US);
pub(crate) struct DelayUsPayload(bindings::GSP_SEQ_BUF_PAYLOAD_DELAY_US);
impl DelayUsPayload {
/// Returns the delay value in microseconds.
@ -487,7 +487,7 @@ unsafe impl AsBytes for DelayUsPayload {}
/// Wrapper for GSP sequencer register store payload.
#[repr(transparent)]
#[derive(Copy, Clone)]
pub(crate) struct RegStorePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_STORE);
pub(crate) struct RegStorePayload(bindings::GSP_SEQ_BUF_PAYLOAD_REG_STORE);
impl RegStorePayload {
/// Returns the register address.
@ -510,7 +510,7 @@ unsafe impl AsBytes for RegStorePayload {}
/// Wrapper for GSP sequencer buffer command.
#[repr(transparent)]
pub(crate) struct SequencerBufferCmd(r570_144::GSP_SEQUENCER_BUFFER_CMD);
pub(crate) struct SequencerBufferCmd(bindings::GSP_SEQUENCER_BUFFER_CMD);
impl SequencerBufferCmd {
/// Returns the opcode as a `SeqBufOpcode` enum, or error if invalid.
@ -612,7 +612,7 @@ unsafe impl AsBytes for SequencerBufferCmd {}
/// Wrapper for GSP run CPU sequencer RPC.
#[repr(transparent)]
pub(crate) struct RunCpuSequencer(r570_144::rpc_run_cpu_sequencer_v17_00);
pub(crate) struct RunCpuSequencer(bindings::rpc_run_cpu_sequencer_v17_00);
impl RunCpuSequencer {
/// Returns the command index.
@ -797,13 +797,6 @@ impl bindings::rpc_message_header_v {
}
}
// SAFETY: We can't derive the Zeroable trait for this binding because the
// procedural macro doesn't support the syntax used by bindgen to create the
// __IncompleteArrayField types. So instead we implement it here, which is safe
// because these are explicitly padded structures only containing types for
// which any bit pattern, including all zeros, is valid.
unsafe impl Zeroable for bindings::rpc_message_header_v {}
/// GSP Message Element.
///
/// This is essentially a message header expected to be followed by the message data.
@ -853,11 +846,16 @@ impl GspMsgElement {
self.inner.checkSum = checksum;
}
/// Returns the total length of the message.
/// Returns the length of the message's payload.
pub(crate) fn payload_length(&self) -> usize {
// `rpc.length` includes the length of the RPC message header.
num::u32_as_usize(self.inner.rpc.length)
.saturating_sub(size_of::<bindings::rpc_message_header_v>())
}
/// Returns the total length of the message, message and RPC headers included.
pub(crate) fn length(&self) -> usize {
// `rpc.length` includes the length of the GspRpcHeader but not the message header.
size_of::<Self>() - size_of::<bindings::rpc_message_header_v>()
+ num::u32_as_usize(self.inner.rpc.length)
size_of::<Self>() + self.payload_length()
}
// Returns the sequence number of the message.

View File

@ -24,8 +24,11 @@
unreachable_pub,
unsafe_op_in_unsafe_fn
)]
use kernel::{
ffi,
prelude::Zeroable, //
};
use kernel::ffi;
use pin_init::MaybeZeroable;
include!("r570_144/bindings.rs");
// SAFETY: This type has a size of zero, so its inclusion into another type should not affect their
// ability to implement `Zeroable`.
unsafe impl<T> kernel::prelude::Zeroable for __IncompleteArrayField<T> {}

View File

@ -320,11 +320,12 @@ pub const NV_VGPU_MSG_EVENT_RECOVERY_ACTION: _bindgen_ty_3 = 4130;
pub const NV_VGPU_MSG_EVENT_NUM_EVENTS: _bindgen_ty_3 = 4131;
pub type _bindgen_ty_3 = ffi::c_uint;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
pub totalVFs: u32_,
pub firstVfOffset: u32_,
pub vfFeatureMask: u32_,
pub __bindgen_padding_0: [u8; 4usize],
pub FirstVFBar0Address: u64_,
pub FirstVFBar1Address: u64_,
pub FirstVFBar2Address: u64_,
@ -340,23 +341,26 @@ pub struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
pub bClientRmAllocatedCtxBuffer: u8_,
pub bNonPowerOf2ChannelCountSupported: u8_,
pub bVfResizableBAR1Supported: u8_,
pub __bindgen_padding_1: [u8; 7usize],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
pub BoardID: u32_,
pub chipSKU: [ffi::c_char; 9usize],
pub chipSKUMod: [ffi::c_char; 5usize],
pub __bindgen_padding_0: [u8; 2usize],
pub skuConfigVersion: u32_,
pub project: [ffi::c_char; 5usize],
pub projectSKU: [ffi::c_char; 5usize],
pub CDP: [ffi::c_char; 6usize],
pub projectSKUMod: [ffi::c_char; 2usize],
pub __bindgen_padding_1: [u8; 2usize],
pub businessCycle: u32_,
}
pub type NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG = [u8_; 17usize];
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
pub base: u64_,
pub limit: u64_,
@ -368,13 +372,14 @@ pub struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
pub blackList: NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
pub numFBRegions: u32_,
pub __bindgen_padding_0: [u8; 4usize],
pub fbRegion: [NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO; 16usize],
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
#[derive(Debug, Copy, Clone, MaybeZeroable)]
pub struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
pub index: u32_,
pub flags: u32_,
@ -391,14 +396,14 @@ impl Default for NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
}
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct DOD_METHOD_DATA {
pub status: u32_,
pub acpiIdListLen: u32_,
pub acpiIdList: [u32_; 16usize],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct JT_METHOD_DATA {
pub status: u32_,
pub jtCaps: u32_,
@ -407,14 +412,14 @@ pub struct JT_METHOD_DATA {
pub __bindgen_padding_0: u8,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct MUX_METHOD_DATA_ELEMENT {
pub acpiId: u32_,
pub mode: u32_,
pub status: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct MUX_METHOD_DATA {
pub tableLen: u32_,
pub acpiIdMuxModeTable: [MUX_METHOD_DATA_ELEMENT; 16usize],
@ -422,13 +427,13 @@ pub struct MUX_METHOD_DATA {
pub acpiIdMuxStateTable: [MUX_METHOD_DATA_ELEMENT; 16usize],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct CAPS_METHOD_DATA {
pub status: u32_,
pub optimusCaps: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct ACPI_METHOD_DATA {
pub bValid: u8_,
pub __bindgen_padding_0: [u8; 3usize],
@ -438,20 +443,20 @@ pub struct ACPI_METHOD_DATA {
pub capsMethodData: CAPS_METHOD_DATA,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS {
pub headIndex: u32_,
pub maxHResolution: u32_,
pub maxVResolution: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS {
pub numHeads: u32_,
pub maxNumHeads: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct BUSINFO {
pub deviceID: u16_,
pub vendorID: u16_,
@ -461,7 +466,7 @@ pub struct BUSINFO {
pub __bindgen_padding_0: u8,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_VF_INFO {
pub totalVFs: u32_,
pub firstVFOffset: u32_,
@ -474,34 +479,37 @@ pub struct GSP_VF_INFO {
pub __bindgen_padding_0: [u8; 5usize],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_PCIE_CONFIG_REG {
pub linkCap: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct EcidManufacturingInfo {
pub ecidLow: u32_,
pub ecidHigh: u32_,
pub ecidExtended: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct FW_WPR_LAYOUT_OFFSET {
pub nonWprHeapOffset: u64_,
pub frtsOffset: u64_,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
#[derive(Debug, Copy, Clone, MaybeZeroable)]
pub struct GspStaticConfigInfo_t {
pub grCapsBits: [u8_; 23usize],
pub __bindgen_padding_0: u8,
pub gidInfo: NV2080_CTRL_GPU_GET_GID_INFO_PARAMS,
pub SKUInfo: NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS,
pub __bindgen_padding_1: [u8; 4usize],
pub fbRegionInfoParams: NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS,
pub sriovCaps: NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS,
pub sriovMaxGfid: u32_,
pub engineCaps: [u32_; 3usize],
pub poisonFuseEnabled: u8_,
pub __bindgen_padding_2: [u8; 7usize],
pub fb_length: u64_,
pub fbio_mask: u64_,
pub fb_bus_width: u32_,
@ -527,16 +535,20 @@ pub struct GspStaticConfigInfo_t {
pub bIsMigSupported: u8_,
pub RTD3GC6TotalBoardPower: u16_,
pub RTD3GC6PerstDelay: u16_,
pub __bindgen_padding_3: [u8; 2usize],
pub bar1PdeBase: u64_,
pub bar2PdeBase: u64_,
pub bVbiosValid: u8_,
pub __bindgen_padding_4: [u8; 3usize],
pub vbiosSubVendor: u32_,
pub vbiosSubDevice: u32_,
pub bPageRetirementSupported: u8_,
pub bSplitVasBetweenServerClientRm: u8_,
pub bClRootportNeedsNosnoopWAR: u8_,
pub __bindgen_padding_5: u8,
pub displaylessMaxHeads: VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS,
pub displaylessMaxResolution: VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS,
pub __bindgen_padding_6: [u8; 4usize],
pub displaylessMaxPixels: u64_,
pub hInternalClient: u32_,
pub hInternalDevice: u32_,
@ -558,7 +570,7 @@ impl Default for GspStaticConfigInfo_t {
}
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GspSystemInfo {
pub gpuPhysAddr: u64_,
pub gpuPhysFbAddr: u64_,
@ -615,7 +627,7 @@ pub struct GspSystemInfo {
pub hostPageSize: u64_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct MESSAGE_QUEUE_INIT_ARGUMENTS {
pub sharedMemPhysAddr: u64_,
pub pageTableEntryCount: u32_,
@ -624,7 +636,7 @@ pub struct MESSAGE_QUEUE_INIT_ARGUMENTS {
pub statQueueOffset: u64_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SR_INIT_ARGUMENTS {
pub oldLevel: u32_,
pub flags: u32_,
@ -632,7 +644,7 @@ pub struct GSP_SR_INIT_ARGUMENTS {
pub __bindgen_padding_0: [u8; 3usize],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_ARGUMENTS_CACHED {
pub messageQueueInitArguments: MESSAGE_QUEUE_INIT_ARGUMENTS,
pub srInitArguments: GSP_SR_INIT_ARGUMENTS,
@ -642,13 +654,13 @@ pub struct GSP_ARGUMENTS_CACHED {
pub profilerArgs: GSP_ARGUMENTS_CACHED__bindgen_ty_1,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_ARGUMENTS_CACHED__bindgen_ty_1 {
pub pa: u64_,
pub size: u64_,
}
#[repr(C)]
#[derive(Copy, Clone, Zeroable)]
#[derive(Copy, Clone, MaybeZeroable)]
pub union rpc_message_rpc_union_field_v03_00 {
pub spare: u32_,
pub cpuRmGfid: u32_,
@ -664,6 +676,7 @@ impl Default for rpc_message_rpc_union_field_v03_00 {
}
pub type rpc_message_rpc_union_field_v = rpc_message_rpc_union_field_v03_00;
#[repr(C)]
#[derive(MaybeZeroable)]
pub struct rpc_message_header_v03_00 {
pub header_version: u32_,
pub signature: u32_,
@ -686,7 +699,7 @@ impl Default for rpc_message_header_v03_00 {
}
pub type rpc_message_header_v = rpc_message_header_v03_00;
#[repr(C)]
#[derive(Copy, Clone, Zeroable)]
#[derive(Copy, Clone, MaybeZeroable)]
pub struct GspFwWprMeta {
pub magic: u64_,
pub revision: u64_,
@ -721,19 +734,19 @@ pub struct GspFwWprMeta {
pub verified: u64_,
}
#[repr(C)]
#[derive(Copy, Clone, Zeroable)]
#[derive(Copy, Clone, MaybeZeroable)]
pub union GspFwWprMeta__bindgen_ty_1 {
pub __bindgen_anon_1: GspFwWprMeta__bindgen_ty_1__bindgen_ty_1,
pub __bindgen_anon_2: GspFwWprMeta__bindgen_ty_1__bindgen_ty_2,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GspFwWprMeta__bindgen_ty_1__bindgen_ty_1 {
pub sysmemAddrOfSignature: u64_,
pub sizeOfSignature: u64_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GspFwWprMeta__bindgen_ty_1__bindgen_ty_2 {
pub gspFwHeapFreeListWprOffset: u32_,
pub unused0: u32_,
@ -749,13 +762,13 @@ impl Default for GspFwWprMeta__bindgen_ty_1 {
}
}
#[repr(C)]
#[derive(Copy, Clone, Zeroable)]
#[derive(Copy, Clone, MaybeZeroable)]
pub union GspFwWprMeta__bindgen_ty_2 {
pub __bindgen_anon_1: GspFwWprMeta__bindgen_ty_2__bindgen_ty_1,
pub __bindgen_anon_2: GspFwWprMeta__bindgen_ty_2__bindgen_ty_2,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GspFwWprMeta__bindgen_ty_2__bindgen_ty_1 {
pub partitionRpcAddr: u64_,
pub partitionRpcRequestOffset: u16_,
@ -767,7 +780,7 @@ pub struct GspFwWprMeta__bindgen_ty_2__bindgen_ty_1 {
pub lsUcodeVersion: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GspFwWprMeta__bindgen_ty_2__bindgen_ty_2 {
pub partitionRpcPadding: [u32_; 4usize],
pub sysmemAddrOfCrashReportQueue: u64_,
@ -802,7 +815,7 @@ pub const LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_SYSMEM: LibosMemoryRegion
pub const LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_FB: LibosMemoryRegionLoc = 2;
pub type LibosMemoryRegionLoc = ffi::c_uint;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct LibosMemoryRegionInitArgument {
pub id8: LibosAddress,
pub pa: LibosAddress,
@ -812,7 +825,7 @@ pub struct LibosMemoryRegionInitArgument {
pub __bindgen_padding_0: [u8; 6usize],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct PACKED_REGISTRY_ENTRY {
pub nameOffset: u32_,
pub type_: u8_,
@ -821,14 +834,14 @@ pub struct PACKED_REGISTRY_ENTRY {
pub length: u32_,
}
#[repr(C)]
#[derive(Debug, Default)]
#[derive(Debug, Default, MaybeZeroable)]
pub struct PACKED_REGISTRY_TABLE {
pub size: u32_,
pub numEntries: u32_,
pub entries: __IncompleteArrayField<PACKED_REGISTRY_ENTRY>,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct msgqTxHeader {
pub version: u32_,
pub size: u32_,
@ -840,13 +853,13 @@ pub struct msgqTxHeader {
pub entryOff: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, Zeroable)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct msgqRxHeader {
pub readPtr: u32_,
}
#[repr(C)]
#[repr(align(8))]
#[derive(Zeroable)]
#[derive(MaybeZeroable)]
pub struct GSP_MSG_QUEUE_ELEMENT {
pub authTagBuffer: [u8_; 16usize],
pub aadBuffer: [u8_; 16usize],
@ -866,7 +879,7 @@ impl Default for GSP_MSG_QUEUE_ELEMENT {
}
}
#[repr(C)]
#[derive(Debug, Default)]
#[derive(Debug, Default, MaybeZeroable)]
pub struct rpc_run_cpu_sequencer_v17_00 {
pub bufferSizeDWord: u32_,
pub cmdIndex: u32_,
@ -884,20 +897,20 @@ pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT: GSP_SEQ_BUF_
pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME: GSP_SEQ_BUF_OPCODE = 8;
pub type GSP_SEQ_BUF_OPCODE = ffi::c_uint;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQ_BUF_PAYLOAD_REG_WRITE {
pub addr: u32_,
pub val: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQ_BUF_PAYLOAD_REG_MODIFY {
pub addr: u32_,
pub mask: u32_,
pub val: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQ_BUF_PAYLOAD_REG_POLL {
pub addr: u32_,
pub mask: u32_,
@ -906,24 +919,24 @@ pub struct GSP_SEQ_BUF_PAYLOAD_REG_POLL {
pub error: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQ_BUF_PAYLOAD_DELAY_US {
pub val: u32_,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
#[derive(Debug, Default, Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQ_BUF_PAYLOAD_REG_STORE {
pub addr: u32_,
pub index: u32_,
}
#[repr(C)]
#[derive(Copy, Clone)]
#[derive(Copy, Clone, MaybeZeroable)]
pub struct GSP_SEQUENCER_BUFFER_CMD {
pub opCode: GSP_SEQ_BUF_OPCODE,
pub payload: GSP_SEQUENCER_BUFFER_CMD__bindgen_ty_1,
}
#[repr(C)]
#[derive(Copy, Clone)]
#[derive(Copy, Clone, MaybeZeroable)]
pub union GSP_SEQUENCER_BUFFER_CMD__bindgen_ty_1 {
pub regWrite: GSP_SEQ_BUF_PAYLOAD_REG_WRITE,
pub regModify: GSP_SEQ_BUF_PAYLOAD_REG_MODIFY,

View File

@ -315,12 +315,12 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd)
fd_prepare_file(fdf)->private_data = req;
*alloc_fd = fd_publish(fdf);
snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
atomic_inc_return(&mdev->request_id), *alloc_fd);
atomic_inc_return(&mdev->request_id), fd_prepare_fd(fdf));
dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
*alloc_fd = fd_publish(fdf);
return 0;
err_free_req:

View File

@ -37,7 +37,6 @@
#define PCIE_CFG_STATUS17 0x44
#define PM_CURRENT_STATE(x) (((x) >> 7) & 0x1)
#define WAIT_LINKUP_TIMEOUT 4000
#define PORT_CLK_RATE 100000000UL
#define MAX_PAYLOAD_SIZE 256
#define MAX_READ_REQ_SIZE 256
@ -350,40 +349,10 @@ static struct pci_ops meson_pci_ops = {
static bool meson_pcie_link_up(struct dw_pcie *pci)
{
struct meson_pcie *mp = to_meson_pcie(pci);
struct device *dev = pci->dev;
u32 speed_okay = 0;
u32 cnt = 0;
u32 state12, state17, smlh_up, ltssm_up, rdlh_up;
u32 state12;
do {
state12 = meson_cfg_readl(mp, PCIE_CFG_STATUS12);
state17 = meson_cfg_readl(mp, PCIE_CFG_STATUS17);
smlh_up = IS_SMLH_LINK_UP(state12);
rdlh_up = IS_RDLH_LINK_UP(state12);
ltssm_up = IS_LTSSM_UP(state12);
if (PM_CURRENT_STATE(state17) < PCIE_GEN3)
speed_okay = 1;
if (smlh_up)
dev_dbg(dev, "smlh_link_up is on\n");
if (rdlh_up)
dev_dbg(dev, "rdlh_link_up is on\n");
if (ltssm_up)
dev_dbg(dev, "ltssm_up is on\n");
if (speed_okay)
dev_dbg(dev, "speed_okay\n");
if (smlh_up && rdlh_up && ltssm_up && speed_okay)
return true;
cnt++;
udelay(10);
} while (cnt < WAIT_LINKUP_TIMEOUT);
dev_err(dev, "error: wait linkup timeout\n");
return false;
state12 = meson_cfg_readl(mp, PCIE_CFG_STATUS12);
return IS_SMLH_LINK_UP(state12) && IS_RDLH_LINK_UP(state12);
}
static int meson_pcie_host_init(struct dw_pcie_rp *pp)

View File

@ -1047,7 +1047,6 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
pcie->parf + PARF_NO_SNOOP_OVERRIDE);
qcom_pcie_clear_aspm_l0s(pcie->pci);
qcom_pcie_clear_hpc(pcie->pci);
return 0;
@ -1316,6 +1315,8 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
goto err_disable_phy;
}
qcom_pcie_clear_aspm_l0s(pcie->pci);
qcom_ep_reset_deassert(pcie);
if (pcie->cfg->ops->config_sid) {
@ -1464,6 +1465,7 @@ static const struct qcom_pcie_cfg cfg_2_1_0 = {
static const struct qcom_pcie_cfg cfg_2_3_2 = {
.ops = &ops_2_3_2,
.no_l0s = true,
};
static const struct qcom_pcie_cfg cfg_2_3_3 = {

View File

@ -652,13 +652,6 @@ static bool vga_is_boot_device(struct vga_device *vgadev)
return true;
}
/*
* Vgadev has neither IO nor MEM enabled. If we haven't found any
* other VGA devices, it is the best candidate so far.
*/
if (!boot_vga)
return true;
return false;
}

View File

@ -89,11 +89,11 @@ int xen_acpi_get_gsi_info(struct pci_dev *dev,
int *trigger_out,
int *polarity_out)
{
int gsi;
u32 gsi;
u8 pin;
struct acpi_prt_entry *entry;
int trigger = ACPI_LEVEL_SENSITIVE;
int polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ?
int ret, polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ?
ACPI_ACTIVE_HIGH : ACPI_ACTIVE_LOW;
if (!dev || !gsi_out || !trigger_out || !polarity_out)
@ -105,17 +105,18 @@ int xen_acpi_get_gsi_info(struct pci_dev *dev,
entry = acpi_pci_irq_lookup(dev, pin);
if (entry) {
ret = 0;
if (entry->link)
gsi = acpi_pci_link_allocate_irq(entry->link,
ret = acpi_pci_link_allocate_irq(entry->link,
entry->index,
&trigger, &polarity,
NULL);
NULL, &gsi);
else
gsi = entry->index;
} else
gsi = -1;
ret = -ENODEV;
if (gsi < 0)
if (ret < 0)
return -EINVAL;
*gsi_out = gsi;

View File

@ -2255,6 +2255,7 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
BTRFS_DATA_RELOC_TREE_OBJECTID, true);
if (IS_ERR(root)) {
if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
ret = PTR_ERR(root);
goto out;
}

View File

@ -481,13 +481,15 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
ASSERT(size <= sectorsize);
/*
* The compressed size also needs to be no larger than a sector.
* That's also why we only need one page as the parameter.
* The compressed size also needs to be no larger than a page.
* That's also why we only need one folio as the parameter.
*/
if (compressed_folio)
if (compressed_folio) {
ASSERT(compressed_size <= sectorsize);
else
ASSERT(compressed_size <= PAGE_SIZE);
} else {
ASSERT(compressed_size == 0);
}
if (compressed_size && compressed_folio)
cur_size = compressed_size;
@ -574,6 +576,18 @@ static bool can_cow_file_range_inline(struct btrfs_inode *inode,
if (offset != 0)
return false;
/*
* Even for bs > ps cases, cow_file_range_inline() can only accept a
* single folio.
*
* This can be problematic and cause access beyond page boundary if a
* page sized folio is passed into that function.
* And encoded write is doing exactly that.
* So here limits the inlined extent size to PAGE_SIZE.
*/
if (size > PAGE_SIZE || compressed_size > PAGE_SIZE)
return false;
/* Inline extents are limited to sectorsize. */
if (size > fs_info->sectorsize)
return false;
@ -4034,11 +4048,6 @@ static int btrfs_read_locked_inode(struct btrfs_inode *inode, struct btrfs_path
btrfs_set_inode_mapping_order(inode);
cache_index:
ret = btrfs_init_file_extent_tree(inode);
if (ret)
goto out;
btrfs_inode_set_file_extent_range(inode, 0,
round_up(i_size_read(vfs_inode), fs_info->sectorsize));
/*
* If we were modified in the current generation and evicted from memory
* and then re-read we need to do a full sync since we don't have any
@ -4125,6 +4134,20 @@ cache_acl:
btrfs_ino(inode), btrfs_root_id(root), ret);
}
/*
* We don't need the path anymore, so release it to avoid holding a read
* lock on a leaf while calling btrfs_init_file_extent_tree(), which can
* allocate memory that triggers reclaim (GFP_KERNEL) and cause a locking
* dependency.
*/
btrfs_release_path(path);
ret = btrfs_init_file_extent_tree(inode);
if (ret)
goto out;
btrfs_inode_set_file_extent_range(inode, 0,
round_up(i_size_read(vfs_inode), fs_info->sectorsize));
if (!maybe_acls)
cache_no_acl(vfs_inode);

View File

@ -736,14 +736,12 @@ bool btrfs_check_options(const struct btrfs_fs_info *info,
*/
void btrfs_set_free_space_cache_settings(struct btrfs_fs_info *fs_info)
{
if (fs_info->sectorsize < PAGE_SIZE) {
if (fs_info->sectorsize != PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
btrfs_info(fs_info,
"forcing free space tree for sector size %u with page size %lu",
fs_info->sectorsize, PAGE_SIZE);
btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
if (!btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
btrfs_info(fs_info,
"forcing free space tree for sector size %u with page size %lu",
fs_info->sectorsize, PAGE_SIZE);
btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
}
btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE);
}
/*

View File

@ -190,7 +190,7 @@ static void do_abort_log_replay(struct walk_control *wc, const char *function,
btrfs_abort_transaction(wc->trans, error);
if (wc->subvol_path->nodes[0]) {
if (wc->subvol_path && wc->subvol_path->nodes[0]) {
btrfs_crit(fs_info,
"subvolume (root %llu) leaf currently being processed:",
btrfs_root_id(wc->root));

View File

@ -533,6 +533,7 @@ static struct dentry *ecryptfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
fsstack_copy_inode_size(dir, lower_dir);
set_nlink(dir, lower_dir->i_nlink);
out:
dput(lower_dir_dentry);
end_creating(lower_dentry);
if (d_really_is_negative(dentry))
d_drop(dentry);
@ -584,7 +585,7 @@ ecryptfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
fsstack_copy_attr_times(dir, lower_dir);
fsstack_copy_inode_size(dir, lower_dir);
out:
end_removing(lower_dentry);
end_creating(lower_dentry);
if (d_really_is_negative(dentry))
d_drop(dentry);
return rc;

View File

@ -1593,6 +1593,9 @@ EXPORT_SYMBOL(igrab);
* @hashval: hash value (usually inode number) to search for
* @test: callback used for comparisons between inodes
* @data: opaque data pointer to pass to @test
* @isnew: return argument telling whether I_NEW was set when
* the inode was found in hash (the caller needs to
* wait for I_NEW to clear)
*
* Search for the inode specified by @hashval and @data in the inode cache.
* If the inode is in the cache, the inode is returned with an incremented

View File

@ -832,7 +832,7 @@ static struct folio *__iomap_get_folio(struct iomap_iter *iter,
if (!mapping_large_folio_support(iter->inode->i_mapping))
len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
if (iter->fbatch) {
if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) {
struct folio *folio = folio_batch_next(iter->fbatch);
if (!folio)
@ -929,7 +929,7 @@ static int iomap_write_begin(struct iomap_iter *iter,
* process so return and let the caller iterate and refill the batch.
*/
if (!folio) {
WARN_ON_ONCE(!iter->fbatch);
WARN_ON_ONCE(!(iter->iomap.flags & IOMAP_F_FOLIO_BATCH));
return 0;
}
@ -1544,23 +1544,39 @@ static int iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
return status;
}
loff_t
/**
* iomap_fill_dirty_folios - fill a folio batch with dirty folios
* @iter: Iteration structure
* @start: Start offset of range. Updated based on lookup progress.
* @end: End offset of range
* @iomap_flags: Flags to set on the associated iomap to track the batch.
*
* Returns the folio count directly. Also returns the associated control flag if
* the the batch lookup is performed and the expected offset of a subsequent
* lookup via out params. The caller is responsible to set the flag on the
* associated iomap.
*/
unsigned int
iomap_fill_dirty_folios(
struct iomap_iter *iter,
loff_t offset,
loff_t length)
loff_t *start,
loff_t end,
unsigned int *iomap_flags)
{
struct address_space *mapping = iter->inode->i_mapping;
pgoff_t start = offset >> PAGE_SHIFT;
pgoff_t end = (offset + length - 1) >> PAGE_SHIFT;
pgoff_t pstart = *start >> PAGE_SHIFT;
pgoff_t pend = (end - 1) >> PAGE_SHIFT;
unsigned int count;
iter->fbatch = kmalloc(sizeof(struct folio_batch), GFP_KERNEL);
if (!iter->fbatch)
return offset + length;
folio_batch_init(iter->fbatch);
if (!iter->fbatch) {
*start = end;
return 0;
}
filemap_get_folios_dirty(mapping, &start, end, iter->fbatch);
return (start << PAGE_SHIFT);
count = filemap_get_folios_dirty(mapping, &pstart, pend, iter->fbatch);
*start = (pstart << PAGE_SHIFT);
*iomap_flags |= IOMAP_F_FOLIO_BATCH;
return count;
}
EXPORT_SYMBOL_GPL(iomap_fill_dirty_folios);
@ -1569,17 +1585,21 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops, void *private)
{
struct folio_batch fbatch;
struct iomap_iter iter = {
.inode = inode,
.pos = pos,
.len = len,
.flags = IOMAP_ZERO,
.private = private,
.fbatch = &fbatch,
};
struct address_space *mapping = inode->i_mapping;
int ret;
bool range_dirty;
folio_batch_init(&fbatch);
/*
* To avoid an unconditional flush, check pagecache state and only flush
* if dirty and the fs returns a mapping that might convert on
@ -1590,11 +1610,11 @@ iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
while ((ret = iomap_iter(&iter, ops)) > 0) {
const struct iomap *srcmap = iomap_iter_srcmap(&iter);
if (WARN_ON_ONCE(iter.fbatch &&
if (WARN_ON_ONCE((iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
srcmap->type != IOMAP_UNWRITTEN))
return -EIO;
if (!iter.fbatch &&
if (!(iter.iomap.flags & IOMAP_F_FOLIO_BATCH) &&
(srcmap->type == IOMAP_HOLE ||
srcmap->type == IOMAP_UNWRITTEN)) {
s64 status;

View File

@ -8,10 +8,10 @@
static inline void iomap_iter_reset_iomap(struct iomap_iter *iter)
{
if (iter->fbatch) {
if (iter->iomap.flags & IOMAP_F_FOLIO_BATCH) {
folio_batch_release(iter->fbatch);
kfree(iter->fbatch);
iter->fbatch = NULL;
folio_batch_reinit(iter->fbatch);
iter->iomap.flags &= ~IOMAP_F_FOLIO_BATCH;
}
iter->status = 0;

View File

@ -369,10 +369,19 @@ locks_dispose_list(struct list_head *dispose)
while (!list_empty(dispose)) {
flc = list_first_entry(dispose, struct file_lock_core, flc_list);
list_del_init(&flc->flc_list);
if (flc->flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
locks_free_lease(file_lease(flc));
else
locks_free_lock(file_lock(flc));
locks_free_lock(file_lock(flc));
}
}
static void
lease_dispose_list(struct list_head *dispose)
{
struct file_lock_core *flc;
while (!list_empty(dispose)) {
flc = list_first_entry(dispose, struct file_lock_core, flc_list);
list_del_init(&flc->flc_list);
locks_free_lease(file_lease(flc));
}
}
@ -576,10 +585,50 @@ lease_setup(struct file_lease *fl, void **priv)
__f_setown(filp, task_pid(current), PIDTYPE_TGID, 0);
}
/**
* lease_open_conflict - see if the given file points to an inode that has
* an existing open that would conflict with the
* desired lease.
* @filp: file to check
* @arg: type of lease that we're trying to acquire
*
* Check to see if there's an existing open fd on this file that would
* conflict with the lease we're trying to set.
*/
static int
lease_open_conflict(struct file *filp, const int arg)
{
struct inode *inode = file_inode(filp);
int self_wcount = 0, self_rcount = 0;
if (arg == F_RDLCK)
return inode_is_open_for_write(inode) ? -EAGAIN : 0;
else if (arg != F_WRLCK)
return 0;
/*
* Make sure that only read/write count is from lease requestor.
* Note that this will result in denying write leases when i_writecount
* is negative, which is what we want. (We shouldn't grant write leases
* on files open for execution.)
*/
if (filp->f_mode & FMODE_WRITE)
self_wcount = 1;
else if (filp->f_mode & FMODE_READ)
self_rcount = 1;
if (atomic_read(&inode->i_writecount) != self_wcount ||
atomic_read(&inode->i_readcount) != self_rcount)
return -EAGAIN;
return 0;
}
static const struct lease_manager_operations lease_manager_ops = {
.lm_break = lease_break_callback,
.lm_change = lease_modify,
.lm_setup = lease_setup,
.lm_open_conflict = lease_open_conflict,
};
/*
@ -1620,7 +1669,7 @@ restart:
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
lease_dispose_list(&dispose);
error = wait_event_interruptible_timeout(new_fl->c.flc_wait,
list_empty(&new_fl->c.flc_blocked_member),
break_time);
@ -1643,7 +1692,7 @@ restart:
out:
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
lease_dispose_list(&dispose);
free_lock:
locks_free_lease(new_fl);
return error;
@ -1727,7 +1776,7 @@ static int __fcntl_getlease(struct file *filp, unsigned int flavor)
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
lease_dispose_list(&dispose);
}
return type;
}
@ -1745,52 +1794,6 @@ int fcntl_getdeleg(struct file *filp, struct delegation *deleg)
return 0;
}
/**
* check_conflicting_open - see if the given file points to an inode that has
* an existing open that would conflict with the
* desired lease.
* @filp: file to check
* @arg: type of lease that we're trying to acquire
* @flags: current lock flags
*
* Check to see if there's an existing open fd on this file that would
* conflict with the lease we're trying to set.
*/
static int
check_conflicting_open(struct file *filp, const int arg, int flags)
{
struct inode *inode = file_inode(filp);
int self_wcount = 0, self_rcount = 0;
if (flags & FL_LAYOUT)
return 0;
if (flags & FL_DELEG)
/* We leave these checks to the caller */
return 0;
if (arg == F_RDLCK)
return inode_is_open_for_write(inode) ? -EAGAIN : 0;
else if (arg != F_WRLCK)
return 0;
/*
* Make sure that only read/write count is from lease requestor.
* Note that this will result in denying write leases when i_writecount
* is negative, which is what we want. (We shouldn't grant write leases
* on files open for execution.)
*/
if (filp->f_mode & FMODE_WRITE)
self_wcount = 1;
else if (filp->f_mode & FMODE_READ)
self_rcount = 1;
if (atomic_read(&inode->i_writecount) != self_wcount ||
atomic_read(&inode->i_readcount) != self_rcount)
return -EAGAIN;
return 0;
}
static int
generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **priv)
{
@ -1827,7 +1830,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **pr
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
error = check_conflicting_open(filp, arg, lease->c.flc_flags);
error = lease->fl_lmops->lm_open_conflict(filp, arg);
if (error)
goto out;
@ -1884,7 +1887,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **pr
* precedes these checks.
*/
smp_mb();
error = check_conflicting_open(filp, arg, lease->c.flc_flags);
error = lease->fl_lmops->lm_open_conflict(filp, arg);
if (error) {
locks_unlink_lock_ctx(&lease->c);
goto out;
@ -1896,7 +1899,7 @@ out_setup:
out:
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
lease_dispose_list(&dispose);
if (is_deleg)
inode_unlock(inode);
if (!error && !my_fl)
@ -1932,7 +1935,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
lease_dispose_list(&dispose);
return error;
}
@ -2735,7 +2738,7 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
lease_dispose_list(&dispose);
}
/*

View File

@ -830,11 +830,9 @@ static inline bool legitimize_path(struct nameidata *nd,
static bool legitimize_links(struct nameidata *nd)
{
int i;
if (unlikely(nd->flags & LOOKUP_CACHED)) {
drop_links(nd);
nd->depth = 0;
return false;
}
VFS_BUG_ON(nd->flags & LOOKUP_CACHED);
for (i = 0; i < nd->depth; i++) {
struct saved *last = nd->stack + i;
if (unlikely(!legitimize_path(nd, &last->link, last->seq))) {
@ -883,6 +881,11 @@ static bool try_to_unlazy(struct nameidata *nd)
BUG_ON(!(nd->flags & LOOKUP_RCU));
if (unlikely(nd->flags & LOOKUP_CACHED)) {
drop_links(nd);
nd->depth = 0;
goto out1;
}
if (unlikely(nd->depth && !legitimize_links(nd)))
goto out1;
if (unlikely(!legitimize_path(nd, &nd->path, nd->seq)))
@ -918,6 +921,11 @@ static bool try_to_unlazy_next(struct nameidata *nd, struct dentry *dentry)
int res;
BUG_ON(!(nd->flags & LOOKUP_RCU));
if (unlikely(nd->flags & LOOKUP_CACHED)) {
drop_links(nd);
nd->depth = 0;
goto out2;
}
if (unlikely(nd->depth && !legitimize_links(nd)))
goto out2;
res = __legitimize_mnt(nd->path.mnt, nd->m_seq);
@ -2836,10 +2844,11 @@ static int filename_parentat(int dfd, struct filename *name,
}
/**
* start_dirop - begin a create or remove dirop, performing locking and lookup
* __start_dirop - begin a create or remove dirop, performing locking and lookup
* @parent: the dentry of the parent in which the operation will occur
* @name: a qstr holding the name within that parent
* @lookup_flags: intent and other lookup flags.
* @state: task state bitmask
*
* The lookup is performed and necessary locks are taken so that, on success,
* the returned dentry can be operated on safely.

View File

@ -137,7 +137,7 @@ static void netfs_read_unlock_folios(struct netfs_io_request *rreq,
rreq->front_folio_order = order;
fsize = PAGE_SIZE << order;
fpos = folio_pos(folio);
fend = umin(fpos + fsize, rreq->i_size);
fend = fpos + fsize;
trace_netfs_collect_folio(rreq, folio, fend, collected_to);

View File

@ -764,9 +764,28 @@ nfsd4_layout_lm_change(struct file_lease *onlist, int arg,
return lease_modify(onlist, arg, dispose);
}
/**
* nfsd4_layout_lm_open_conflict - see if the given file points to an inode that has
* an existing open that would conflict with the
* desired lease.
* @filp: file to check
* @arg: type of lease that we're trying to acquire
*
* The kernel will call into this operation to determine whether there
* are conflicting opens that may prevent the layout from being granted.
* For nfsd, that check is done at a higher level, so this trivially
* returns 0.
*/
static int
nfsd4_layout_lm_open_conflict(struct file *filp, int arg)
{
return 0;
}
static const struct lease_manager_operations nfsd4_layouts_lm_ops = {
.lm_break = nfsd4_layout_lm_break,
.lm_change = nfsd4_layout_lm_change,
.lm_break = nfsd4_layout_lm_break,
.lm_change = nfsd4_layout_lm_change,
.lm_open_conflict = nfsd4_layout_lm_open_conflict,
};
int

View File

@ -5555,10 +5555,29 @@ nfsd_change_deleg_cb(struct file_lease *onlist, int arg,
return -EAGAIN;
}
/**
* nfsd4_deleg_lm_open_conflict - see if the given file points to an inode that has
* an existing open that would conflict with the
* desired lease.
* @filp: file to check
* @arg: type of lease that we're trying to acquire
*
* The kernel will call into this operation to determine whether there
* are conflicting opens that may prevent the deleg from being granted.
* For nfsd, that check is done at a higher level, so this trivially
* returns 0.
*/
static int
nfsd4_deleg_lm_open_conflict(struct file *filp, int arg)
{
return 0;
}
static const struct lease_manager_operations nfsd_lease_mng_ops = {
.lm_breaker_owns_lease = nfsd_breaker_owns_lease,
.lm_break = nfsd_break_deleg_cb,
.lm_change = nfsd_change_deleg_cb,
.lm_open_conflict = nfsd4_deleg_lm_open_conflict,
};
static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)

View File

@ -517,14 +517,18 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) {
/* Namespaces that hang of nsproxy. */
case PIDFD_GET_CGROUP_NAMESPACE:
#ifdef CONFIG_CGROUPS
if (!ns_ref_get(nsp->cgroup_ns))
break;
ns_common = to_ns_common(nsp->cgroup_ns);
#endif
break;
case PIDFD_GET_IPC_NAMESPACE:
#ifdef CONFIG_IPC_NS
if (!ns_ref_get(nsp->ipc_ns))
break;
ns_common = to_ns_common(nsp->ipc_ns);
#endif
break;
case PIDFD_GET_MNT_NAMESPACE:
if (!ns_ref_get(nsp->mnt_ns))
@ -532,32 +536,43 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ns_common = to_ns_common(nsp->mnt_ns);
break;
case PIDFD_GET_NET_NAMESPACE:
#ifdef CONFIG_NET_NS
if (!ns_ref_get(nsp->net_ns))
break;
ns_common = to_ns_common(nsp->net_ns);
#endif
break;
case PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE:
#ifdef CONFIG_PID_NS
if (!ns_ref_get(nsp->pid_ns_for_children))
break;
ns_common = to_ns_common(nsp->pid_ns_for_children);
#endif
break;
case PIDFD_GET_TIME_NAMESPACE:
#ifdef CONFIG_TIME_NS
if (!ns_ref_get(nsp->time_ns))
break;
ns_common = to_ns_common(nsp->time_ns);
#endif
break;
case PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE:
#ifdef CONFIG_TIME_NS
if (!ns_ref_get(nsp->time_ns_for_children))
break;
ns_common = to_ns_common(nsp->time_ns_for_children);
#endif
break;
case PIDFD_GET_UTS_NAMESPACE:
#ifdef CONFIG_UTS_NS
if (!ns_ref_get(nsp->uts_ns))
break;
ns_common = to_ns_common(nsp->uts_ns);
#endif
break;
/* Namespaces that don't hang of nsproxy. */
case PIDFD_GET_USER_NAMESPACE:
#ifdef CONFIG_USER_NS
scoped_guard(rcu) {
struct user_namespace *user_ns;
@ -566,8 +581,10 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
ns_common = to_ns_common(user_ns);
}
#endif
break;
case PIDFD_GET_PID_NAMESPACE:
#ifdef CONFIG_PID_NS
scoped_guard(rcu) {
struct pid_namespace *pid_ns;
@ -576,6 +593,7 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
ns_common = to_ns_common(pid_ns);
}
#endif
break;
default:
return -ENOIOCTLCMD;

View File

@ -1831,7 +1831,6 @@ xfs_buffered_write_iomap_begin(
*/
if (flags & IOMAP_ZERO) {
xfs_fileoff_t eof_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
u64 end;
if (isnullstartblock(imap.br_startblock) &&
offset_fsb >= eof_fsb)
@ -1851,12 +1850,14 @@ xfs_buffered_write_iomap_begin(
*/
if (imap.br_state == XFS_EXT_UNWRITTEN &&
offset_fsb < eof_fsb) {
loff_t len = min(count,
XFS_FSB_TO_B(mp, imap.br_blockcount));
loff_t foffset = offset, fend;
end = iomap_fill_dirty_folios(iter, offset, len);
fend = offset +
min(count, XFS_FSB_TO_B(mp, imap.br_blockcount));
iomap_fill_dirty_folios(iter, &foffset, fend,
&iomap_flags);
end_fsb = min_t(xfs_fileoff_t, end_fsb,
XFS_B_TO_FSB(mp, end));
XFS_B_TO_FSB(mp, foffset));
}
xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);

View File

@ -51,7 +51,7 @@
int acpi_irq_penalty_init(void);
int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
int *polarity, char **name);
int *polarity, char **name, u32 *gsi);
int acpi_pci_link_free_irq(acpi_handle handle);
/* ACPI PCI Device Binding */

View File

@ -60,6 +60,12 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
int drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state);
void drm_atomic_helper_commit_encoder_bridge_disable(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_crtc_disable(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_encoder_bridge_post_disable(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_tail(struct drm_atomic_state *state);
@ -89,8 +95,24 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
void
drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state);
void drm_atomic_helper_commit_crtc_set_mode(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_encoder_bridge_pre_enable(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_crtc_enable(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_encoder_bridge_enable(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *old_state);

View File

@ -176,33 +176,17 @@ struct drm_bridge_funcs {
/**
* @disable:
*
* The @disable callback should disable the bridge.
* This callback should disable the bridge. It is called right before
* the preceding element in the display pipe is disabled. If the
* preceding element is a bridge this means it's called before that
* bridge's @disable vfunc. If the preceding element is a &drm_encoder
* it's called right before the &drm_encoder_helper_funcs.disable,
* &drm_encoder_helper_funcs.prepare or &drm_encoder_helper_funcs.dpms
* hook.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
*
* If the preceding element is a &drm_bridge, then this is called before
* that bridge is disabled via one of:
*
* - &drm_bridge_funcs.disable
* - &drm_bridge_funcs.atomic_disable
*
* If the preceding element of the bridge is a display controller, then
* this callback is called before the encoder is disabled via one of:
*
* - &drm_encoder_helper_funcs.atomic_disable
* - &drm_encoder_helper_funcs.prepare
* - &drm_encoder_helper_funcs.disable
* - &drm_encoder_helper_funcs.dpms
*
* and the CRTC is disabled via one of:
*
* - &drm_crtc_helper_funcs.prepare
* - &drm_crtc_helper_funcs.atomic_disable
* - &drm_crtc_helper_funcs.disable
* - &drm_crtc_helper_funcs.dpms.
*
* The @disable callback is optional.
*
* NOTE:
@ -215,34 +199,17 @@ struct drm_bridge_funcs {
/**
* @post_disable:
*
* This callback should disable the bridge. It is called right after the
* preceding element in the display pipe is disabled. If the preceding
* element is a bridge this means it's called after that bridge's
* @post_disable function. If the preceding element is a &drm_encoder
* it's called right after the encoder's
* &drm_encoder_helper_funcs.disable, &drm_encoder_helper_funcs.prepare
* or &drm_encoder_helper_funcs.dpms hook.
*
* The bridge must assume that the display pipe (i.e. clocks and timing
* signals) feeding this bridge is no longer running when the
* @post_disable is called.
*
* This callback should perform all the actions required by the hardware
* after it has stopped receiving signals from the preceding element.
*
* If the preceding element is a &drm_bridge, then this is called after
* that bridge is post-disabled (unless marked otherwise by the
* @pre_enable_prev_first flag) via one of:
*
* - &drm_bridge_funcs.post_disable
* - &drm_bridge_funcs.atomic_post_disable
*
* If the preceding element of the bridge is a display controller, then
* this callback is called after the encoder is disabled via one of:
*
* - &drm_encoder_helper_funcs.atomic_disable
* - &drm_encoder_helper_funcs.prepare
* - &drm_encoder_helper_funcs.disable
* - &drm_encoder_helper_funcs.dpms
*
* and the CRTC is disabled via one of:
*
* - &drm_crtc_helper_funcs.prepare
* - &drm_crtc_helper_funcs.atomic_disable
* - &drm_crtc_helper_funcs.disable
* - &drm_crtc_helper_funcs.dpms
* signals) feeding it is no longer running when this callback is
* called.
*
* The @post_disable callback is optional.
*
@ -285,30 +252,18 @@ struct drm_bridge_funcs {
/**
* @pre_enable:
*
* This callback should enable the bridge. It is called right before
* the preceding element in the display pipe is enabled. If the
* preceding element is a bridge this means it's called before that
* bridge's @pre_enable function. If the preceding element is a
* &drm_encoder it's called right before the encoder's
* &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or
* &drm_encoder_helper_funcs.dpms hook.
*
* The display pipe (i.e. clocks and timing signals) feeding this bridge
* will not yet be running when the @pre_enable is called.
*
* This callback should perform all the necessary actions to prepare the
* bridge to accept signals from the preceding element.
*
* If the preceding element is a &drm_bridge, then this is called before
* that bridge is pre-enabled (unless marked otherwise by
* @pre_enable_prev_first flag) via one of:
*
* - &drm_bridge_funcs.pre_enable
* - &drm_bridge_funcs.atomic_pre_enable
*
* If the preceding element of the bridge is a display controller, then
* this callback is called before the CRTC is enabled via one of:
*
* - &drm_crtc_helper_funcs.atomic_enable
* - &drm_crtc_helper_funcs.commit
*
* and the encoder is enabled via one of:
*
* - &drm_encoder_helper_funcs.atomic_enable
* - &drm_encoder_helper_funcs.enable
* - &drm_encoder_helper_funcs.commit
* will not yet be running when this callback is called. The bridge must
* not enable the display link feeding the next bridge in the chain (if
* there is one) when this callback is called.
*
* The @pre_enable callback is optional.
*
@ -322,31 +277,19 @@ struct drm_bridge_funcs {
/**
* @enable:
*
* The @enable callback should enable the bridge.
* This callback should enable the bridge. It is called right after
* the preceding element in the display pipe is enabled. If the
* preceding element is a bridge this means it's called after that
* bridge's @enable function. If the preceding element is a
* &drm_encoder it's called right after the encoder's
* &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or
* &drm_encoder_helper_funcs.dpms hook.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
* If the preceding element is a &drm_bridge, then this is called after
* that bridge is enabled via one of:
*
* - &drm_bridge_funcs.enable
* - &drm_bridge_funcs.atomic_enable
*
* If the preceding element of the bridge is a display controller, then
* this callback is called after the CRTC is enabled via one of:
*
* - &drm_crtc_helper_funcs.atomic_enable
* - &drm_crtc_helper_funcs.commit
*
* and the encoder is enabled via one of:
*
* - &drm_encoder_helper_funcs.atomic_enable
* - &drm_encoder_helper_funcs.enable
* - drm_encoder_helper_funcs.commit
*
* The @enable callback is optional.
*
* NOTE:
@ -359,30 +302,17 @@ struct drm_bridge_funcs {
/**
* @atomic_pre_enable:
*
* This callback should enable the bridge. It is called right before
* the preceding element in the display pipe is enabled. If the
* preceding element is a bridge this means it's called before that
* bridge's @atomic_pre_enable or @pre_enable function. If the preceding
* element is a &drm_encoder it's called right before the encoder's
* &drm_encoder_helper_funcs.atomic_enable hook.
*
* The display pipe (i.e. clocks and timing signals) feeding this bridge
* will not yet be running when the @atomic_pre_enable is called.
*
* This callback should perform all the necessary actions to prepare the
* bridge to accept signals from the preceding element.
*
* If the preceding element is a &drm_bridge, then this is called before
* that bridge is pre-enabled (unless marked otherwise by
* @pre_enable_prev_first flag) via one of:
*
* - &drm_bridge_funcs.pre_enable
* - &drm_bridge_funcs.atomic_pre_enable
*
* If the preceding element of the bridge is a display controller, then
* this callback is called before the CRTC is enabled via one of:
*
* - &drm_crtc_helper_funcs.atomic_enable
* - &drm_crtc_helper_funcs.commit
*
* and the encoder is enabled via one of:
*
* - &drm_encoder_helper_funcs.atomic_enable
* - &drm_encoder_helper_funcs.enable
* - &drm_encoder_helper_funcs.commit
* will not yet be running when this callback is called. The bridge must
* not enable the display link feeding the next bridge in the chain (if
* there is one) when this callback is called.
*
* The @atomic_pre_enable callback is optional.
*/
@ -392,31 +322,18 @@ struct drm_bridge_funcs {
/**
* @atomic_enable:
*
* The @atomic_enable callback should enable the bridge.
* This callback should enable the bridge. It is called right after
* the preceding element in the display pipe is enabled. If the
* preceding element is a bridge this means it's called after that
* bridge's @atomic_enable or @enable function. If the preceding element
* is a &drm_encoder it's called right after the encoder's
* &drm_encoder_helper_funcs.atomic_enable hook.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
* If the preceding element is a &drm_bridge, then this is called after
* that bridge is enabled via one of:
*
* - &drm_bridge_funcs.enable
* - &drm_bridge_funcs.atomic_enable
*
* If the preceding element of the bridge is a display controller, then
* this callback is called after the CRTC is enabled via one of:
*
* - &drm_crtc_helper_funcs.atomic_enable
* - &drm_crtc_helper_funcs.commit
*
* and the encoder is enabled via one of:
*
* - &drm_encoder_helper_funcs.atomic_enable
* - &drm_encoder_helper_funcs.enable
* - drm_encoder_helper_funcs.commit
*
* The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
@ -424,32 +341,16 @@ struct drm_bridge_funcs {
/**
* @atomic_disable:
*
* The @atomic_disable callback should disable the bridge.
* This callback should disable the bridge. It is called right before
* the preceding element in the display pipe is disabled. If the
* preceding element is a bridge this means it's called before that
* bridge's @atomic_disable or @disable vfunc. If the preceding element
* is a &drm_encoder it's called right before the
* &drm_encoder_helper_funcs.atomic_disable hook.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
* If the preceding element is a &drm_bridge, then this is called before
* that bridge is disabled via one of:
*
* - &drm_bridge_funcs.disable
* - &drm_bridge_funcs.atomic_disable
*
* If the preceding element of the bridge is a display controller, then
* this callback is called before the encoder is disabled via one of:
*
* - &drm_encoder_helper_funcs.atomic_disable
* - &drm_encoder_helper_funcs.prepare
* - &drm_encoder_helper_funcs.disable
* - &drm_encoder_helper_funcs.dpms
*
* and the CRTC is disabled via one of:
*
* - &drm_crtc_helper_funcs.prepare
* - &drm_crtc_helper_funcs.atomic_disable
* - &drm_crtc_helper_funcs.disable
* - &drm_crtc_helper_funcs.dpms.
*
* The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
@ -458,34 +359,16 @@ struct drm_bridge_funcs {
/**
* @atomic_post_disable:
*
* This callback should disable the bridge. It is called right after the
* preceding element in the display pipe is disabled. If the preceding
* element is a bridge this means it's called after that bridge's
* @atomic_post_disable or @post_disable function. If the preceding
* element is a &drm_encoder it's called right after the encoder's
* &drm_encoder_helper_funcs.atomic_disable hook.
*
* The bridge must assume that the display pipe (i.e. clocks and timing
* signals) feeding this bridge is no longer running when the
* @atomic_post_disable is called.
*
* This callback should perform all the actions required by the hardware
* after it has stopped receiving signals from the preceding element.
*
* If the preceding element is a &drm_bridge, then this is called after
* that bridge is post-disabled (unless marked otherwise by the
* @pre_enable_prev_first flag) via one of:
*
* - &drm_bridge_funcs.post_disable
* - &drm_bridge_funcs.atomic_post_disable
*
* If the preceding element of the bridge is a display controller, then
* this callback is called after the encoder is disabled via one of:
*
* - &drm_encoder_helper_funcs.atomic_disable
* - &drm_encoder_helper_funcs.prepare
* - &drm_encoder_helper_funcs.disable
* - &drm_encoder_helper_funcs.dpms
*
* and the CRTC is disabled via one of:
*
* - &drm_crtc_helper_funcs.prepare
* - &drm_crtc_helper_funcs.atomic_disable
* - &drm_crtc_helper_funcs.disable
* - &drm_crtc_helper_funcs.dpms
* signals) feeding it is no longer running when this callback is
* called.
*
* The @atomic_post_disable callback is optional.
*/

View File

@ -49,6 +49,7 @@ struct lease_manager_operations {
int (*lm_change)(struct file_lease *, int, struct list_head *);
void (*lm_setup)(struct file_lease *, void **);
bool (*lm_breaker_owns_lease)(struct file_lease *);
int (*lm_open_conflict)(struct file *, int);
};
struct lock_manager {

View File

@ -88,6 +88,9 @@ struct vm_fault;
/*
* Flags set by the core iomap code during operations:
*
* IOMAP_F_FOLIO_BATCH indicates that the folio batch mechanism is active
* for this operation, set by iomap_fill_dirty_folios().
*
* IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
* has changed as the result of this write operation.
*
@ -95,6 +98,7 @@ struct vm_fault;
* range it covers needs to be remapped by the high level before the operation
* can proceed.
*/
#define IOMAP_F_FOLIO_BATCH (1U << 13)
#define IOMAP_F_SIZE_CHANGED (1U << 14)
#define IOMAP_F_STALE (1U << 15)
@ -352,8 +356,8 @@ bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops);
loff_t iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t offset,
loff_t length);
unsigned int iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t *start,
loff_t end, unsigned int *iomap_flags);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
bool *did_zero, const struct iomap_ops *ops,
const struct iomap_write_ops *write_ops, void *private);

View File

@ -23,7 +23,7 @@
#define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */
struct xattr_args {
__aligned_u64 __user value;
__aligned_u64 value;
__u32 size;
__u32 flags;
};

View File

@ -902,8 +902,11 @@ out_clean:
for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)
kthread_stop(data[thr].thr);
acomp_request_free(data[thr].cr);
crypto_free_acomp(data[thr].cc);
if (data[thr].cr)
acomp_request_free(data[thr].cr);
if (!IS_ERR_OR_NULL(data[thr].cc))
crypto_free_acomp(data[thr].cc);
}
vfree(data);
}
@ -1499,8 +1502,11 @@ out_clean:
for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)
kthread_stop(data[thr].thr);
acomp_request_free(data[thr].cr);
crypto_free_acomp(data[thr].cc);
if (data[thr].cr)
acomp_request_free(data[thr].cr);
if (!IS_ERR_OR_NULL(data[thr].cc))
crypto_free_acomp(data[thr].cc);
}
vfree(data);
}