1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

Compare commits

...

21 Commits

Author SHA1 Message Date
Linus Torvalds
6da43bbeb6 VFIO fixes for v6.18-rc6
- Fix vfio selftests to remove the expectation that the IOMMU
    supports a 64-bit IOVA space.  These manifest both in the original
    set of tests introduced this development cycle in identity mapping
    the IOVA to buffer virtual address space, as well as the more
    recent boundary testing.  Implement facilities for collecting the
    valid IOVA ranges from the backend, implement a simple IOVA
    allocator, and use the information for determining extents.
    (Alex Mastro)
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEQvbATlQL0amee4qQI5ubbjuwiyIFAmkWXwYRHGFsZXhAc2hh
 emJvdC5vcmcACgkQI5ubbjuwiyK96w//ep97M1B5RSA4/AIP4j+fovnlM2RVUrPy
 RKxVYK+rzftO4Hq8sg1Co3aB68hItbYiw/0BDUgQBFFCEoInQ9zht89tsHhuXq55
 lYq9hyYMnp60FLzpX8Fi2HKf8SKTdD+6P1hX5UWZfYExc/tCD/9FPE2QsfUTbnh/
 u2aguEp0RhgC8xCcR/5EsGFzcBgioDcTBQq5UQ/5By51nUNvdmHKRHKRC7ZmQfiK
 bwFpYseWHRdzdBH2KggNaDZUh0q7Y9+cwL2UndQV5IfwGhGWODztOsVhhoxAAu0n
 c6gXYDg34i5i1epPrr8Yvv8trDzMFCRYJi4g8BsLPOBXe8Lr2bWj/4wA1ftdW6Ha
 0ii4Gx1UO1vz0KZQBajMHv5pO06lJZshzEQgHr3sYgjaRg8AVTV6hI3IVaty2JWk
 DPKPNSGKhjkWvWnx73KFiiepz5zXEL7r7ooFBimtBwFr/5bDB8hfPjpXMh+mhiZu
 QW6DV+Mrqg1G1KDCwdHSvbBwH8IvfX/tQbs9eKvGt+6ICpH1GJ1cUr58vjcc6ESi
 sJzKI0ceTjGZAhHJogO4xAzhI8H6kwtDHB1ZC9QLHIwbkXFN1m64VjOwwXuHk0HT
 559gjqjH0yKNSABwRMV1nKzn9EFnivQn1tAoGkUGSFE9ZI/VfCNNlspPziZEz0z9
 u14SILGMqWU=
 =ywvT
 -----END PGP SIGNATURE-----

Merge tag 'vfio-v6.18-rc6' of https://github.com/awilliam/linux-vfio

Pull VFIO seftest fixes from Alex Williamson:

 - Fix vfio selftests to remove the expectation that the IOMMU supports
   a 64-bit IOVA space.

   These manifest both in the original set of tests introduced this
   development cycle in identity mapping the IOVA to buffer virtual
   address space, as well as the more recent boundary testing.

   Implement facilities for collecting the valid IOVA ranges from the
   backend, implement a simple IOVA allocator, and use the information
   for determining extents (Alex Mastro)

* tag 'vfio-v6.18-rc6' of https://github.com/awilliam/linux-vfio:
  vfio: selftests: replace iova=vaddr with allocated iovas
  vfio: selftests: add iova allocator
  vfio: selftests: fix map limit tests to use last available iova
  vfio: selftests: add iova range query helpers
2025-11-13 17:00:40 -08:00
Linus Torvalds
01814e11e5 hwmon fixes for v6.18-rc6
* gpd-fan: Fix compilation error for non-ACPI builds, and initialize EC
 	   when loading the driver
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEiHPvMQj9QTOCiqgVyx8mb86fmYEFAmkWPEsACgkQyx8mb86f
 mYE1CQ/+NtX8LrqP4mzEKoYt8Ppx8xTqE31CxLOZH0NeBo0oLp0Oct8zkIHLfLss
 ZKdfLPL9yeXYKA6De/FRMzFKtfYGqA9lLrpizxXtp+K4PL9LLJsf4XYbx23tQkPS
 IJqnnhVsLGFWLM1b9Lkyi6nxsGkLR9sqMD6M5OpzArgUXACEn9Lrq+4LTUrD+sMr
 6iTU3Jbd/J34dghZdU/wcGQbU0sh3ysnOGZHVZuNJdKFJHxhS3iAxSMReTi5iBOR
 9EepPWxI7sRTA5lSaTCFhrNXsFpkVE92S3IQpG5xEfYKqIKFZyfibHTUkidghMSR
 oyIRUT+BNxcZJQcdLqVEXSGuVeJKv6NnL8+b3FfwBsQ5Hlb1/kkqLRdvWg+yBYFu
 BDGjFOosAlKHot7LU2Gjd8ylJvzC5Et/BYAvjmYxBTbn8peWCHWvyZp95V/6pXzl
 FmNx913mq2y2uNlLD8Ox7mkSLpoyPH6QKMzXpRe9Ftgwa0N7A0SbclMjiabjTts6
 k3gVfcNZ+bM6dA5IY7hktLMSFQvPv+qYWUpd2DuUxAtYBWDQR0LA+RBjjI3sA8cU
 qDnt86d3W3blnV+Lvy/dANXOrrApiiz7YYG6rGc+ZOsoCKIG8rVLKy2jBwInHVY8
 Z8AVr5C0bswa0zcmcCIJMBb13aZNMJtYMio9hGWQqqTfCeQ8gpU=
 =YXus
 -----END PGP SIGNATURE-----

Merge tag 'hwmon-for-v6.18-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging

Pull hwmon fixes from Guenter Roeck:

 - gpd-fan: Fix compilation error for non-ACPI builds, and initialize EC
   when loading the driver

* tag 'hwmon-for-v6.18-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging:
  hwmon: (gpd-fan) initialize EC on driver load for Win 4
  hwmon: (gpd-fan) Fix compilation error in non-ACPI builds
2025-11-13 16:54:36 -08:00
Linus Torvalds
aecba2e013 Power management fixes for 6.18-rc6
- Fix issues related to using inadequate data types and incorrect use
    of atomic variables in the compressed hibernation images handling
    code that were introduced during the 6.9 development cycle (Mario
    Limonciello)
 
  - Move a X86_FEATURE_IDA check from turbo_is_disabled() to the places
    where a new value for MSR_IA32_PERF_CTL is computed in intel_pstate
    to address a regression preventing users from enabling turbo
    frequencies post-boot (Srinivas Pandruvada)
 -----BEGIN PGP SIGNATURE-----
 
 iQFGBAABCAAwFiEEcM8Aw/RY0dgsiRUR7l+9nS/U47UFAmkWOnoSHHJqd0Byand5
 c29ja2kubmV0AAoJEO5fvZ0v1OO14gkH/08TYjtNEvC6acD2r5gJTpacdnExWopL
 NFmRyhuZM21Ja2gd1q0xtPvcTdAz3rkhB4vqn9KQ0oLkfXj08/+zpRyOP3PzVVSp
 bvE/Am28s/VChjDg/MFcP7o/fLSNoL73wK6er+i721KIV1uscK4FydkPNs6gpBHw
 03FkUJX8jRjil0Cp6km2O0Zo5SEgm/U6wDjR5Azpdru8VKbI1RaxCMsR0/HnlA9Y
 pUAph9NX1UBBjdMFFdn8++Vna8XJX4qe9CiYT7KwGbGx5jUpVBaT9d/hPm0O/mJt
 VvNe3Dl5soFM/3yibsvV4sTcZHNPTsIKjuKIqwL4F0TCGug9kxwjFBk=
 =r3Fq
 -----END PGP SIGNATURE-----

Merge tag 'pm-6.18-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management fixes from Rafael Wysocki:
 "These fix issues related to the handling of compressed hibernation
  images and a recent intel_pstate driver regression:

   - Fix issues related to using inadequate data types and incorrect use
     of atomic variables in the compressed hibernation images handling
     code that were introduced during the 6.9 development cycle (Mario
     Limonciello)

   - Move a X86_FEATURE_IDA check from turbo_is_disabled() to the places
     where a new value for MSR_IA32_PERF_CTL is computed in intel_pstate
     to address a regression preventing users from enabling turbo
     frequencies post-boot (Srinivas Pandruvada)"

* tag 'pm-6.18-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  cpufreq: intel_pstate: Check IDA only before MSR_IA32_PERF_CTL writes
  PM: hibernate: Fix style issues in save_compressed_image()
  PM: hibernate: Use atomic64_t for compressed_size variable
  PM: hibernate: Emit an error when image writing fails
2025-11-13 16:31:07 -08:00
Linus Torvalds
6a3cc1b749 ACPI support fixes for 6.18-rc6
- Limit some checks in the ACPI CPPC library to online CPUs to avoid
    accessing uninitialized per-CPU variables when some CPUs are offline
    to start with, like during boot with "nosmt=force" (Gautham Shenoy)
 
  - Rework add_boot_memory_ranges() in the ACPI MRRM table parser to fix
    memory leaks and improve error handling (Kaushlendra Kumar)
 -----BEGIN PGP SIGNATURE-----
 
 iQFGBAABCAAwFiEEcM8Aw/RY0dgsiRUR7l+9nS/U47UFAmkWNu8SHHJqd0Byand5
 c29ja2kubmV0AAoJEO5fvZ0v1OO1VsoIAIZJdKrHCGBveZP+Xo2GFhulHRq3534A
 M0//mZ23iSXdP4U8lYkQDMOrfuDTqb6AMFKylgsFKh/+AWRtKHxOWoAbdF979xS5
 doBdJn1aWt+oJAN/37rYn/cnk+AeTpyejJ94GYdMTHu+5PkoCEFBP7Ecn41ks0UM
 0N0UDrxC6+2FwbPunnYFFMP9kSJj273ga3y9t1M2reKjNsjSoBoACuhkYVI/pL/n
 Gl8r0Z63wDeN/4YIEOMFEjpdDfUY1HoXg7QQvYSPlWRuq/5OLYJowKumR816pt71
 YIrafkZV1UzEKcHeDsmipGfWKghwotVy4XpR68div58iX0oPa7t6tWM=
 =t6UM
 -----END PGP SIGNATURE-----

Merge tag 'acpi-6.18-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull ACPI fixes from Rafael Wysocki:
 "These fix issues in the ACPI CPPC library and in the recently added
  parser for the ACPI MRRM table:

   - Limit some checks in the ACPI CPPC library to online CPUs to avoid
     accessing uninitialized per-CPU variables when some CPUs are
     offline to start with, like during boot with 'nosmt=force' (Gautham
     Shenoy)

   - Rework add_boot_memory_ranges() in the ACPI MRRM table parser to
     fix memory leaks and improve error handling (Kaushlendra Kumar)"

* tag 'acpi-6.18-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  ACPI: MRRM: Fix memory leaks and improve error handling
  ACPI: CPPC: Limit perf ctrs in PCC check only to online CPUs
  ACPI: CPPC: Perform fast check switch only for online CPUs
  ACPI: CPPC: Check _CPC validity for only the online CPUs
  ACPI: CPPC: Detect preferred core availability on online CPUs
2025-11-13 16:22:36 -08:00
Rafael J. Wysocki
161284b26f Merge branch 'pm-sleep'
Merge fixes for issues related to the handling of compressed hibernation
images that were introduced during the 6.9 development cycle.

* pm-sleep:
  PM: hibernate: Fix style issues in save_compressed_image()
  PM: hibernate: Use atomic64_t for compressed_size variable
  PM: hibernate: Emit an error when image writing fails
2025-11-13 21:05:46 +01:00
Rafael J. Wysocki
7564f3543c Merge branches 'acpi-cppc' and 'acpi-tables'
Merge ACPI CPPC library fixes and an ACPI MRRM table parser fix for
6.18-rc6.

* acpi-cppc:
  ACPI: CPPC: Limit perf ctrs in PCC check only to online CPUs
  ACPI: CPPC: Perform fast check switch only for online CPUs
  ACPI: CPPC: Check _CPC validity for only the online CPUs
  ACPI: CPPC: Detect preferred core availability on online CPUs

* acpi-tables:
  ACPI: MRRM: Fix memory leaks and improve error handling
2025-11-13 20:40:51 +01:00
Cryolitia PukNgae
c55a8e24cd hwmon: (gpd-fan) initialize EC on driver load for Win 4
The original implement will re-init the EC when it reports a zero
value, and it's a workaround for the black box buggy firmware.

Now a contributer test and report that, the bug is that, the firmware
won't initialize the EC on boot, so the EC ramains in unusable status.
And it won't need to re-init it during runtime. The original implement
is not perfect, any write command will be ignored until we first read
it. Just re-init it unconditionally when the driver load could work.

Fixes: 0ab88e239439 ("hwmon: add GPD devices sensor driver")
Co-developed-by: kylon <3252255+kylon@users.noreply.github.com>
Signed-off-by: kylon <3252255+kylon@users.noreply.github.com>
Link: https://github.com/Cryolitia/gpd-fan-driver/pull/20
Signed-off-by: Cryolitia PukNgae <cryolitia@uniontech.com>
Link: https://lore.kernel.org/r/20251030-win4-v1-1-c374dcb86985@uniontech.com
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
2025-11-12 11:54:37 -08:00
Gopi Krishna Menon
9efb297c52 hwmon: (gpd-fan) Fix compilation error in non-ACPI builds
Building gpd-fan driver without CONFIG_ACPI results in the following
build errors:

drivers/hwmon/gpd-fan.c: In function ‘gpd_ecram_read’:
drivers/hwmon/gpd-fan.c:228:9: error: implicit declaration of function ‘outb’ [-Werror=implicit-function-declaration]
  228 |         outb(0x2E, addr_port);
      |         ^~~~
drivers/hwmon/gpd-fan.c:241:16: error: implicit declaration of function ‘inb’ [-Werror=implicit-function-declaration]
  241 |         *val = inb(data_port);

The definitions for inb() and outb() come from <linux/io.h>
(specifically through <asm/io.h>), which is implicitly included via
<acpi_io.h>. When CONFIG_ACPI is not set, <acpi_io.h> is not included
resulting in <linux/io.h> to be omitted as well.

Since the driver does not depend on ACPI, remove <linux/acpi.h> and add
<linux/io.h> directly to fix the compilation errors.

Signed-off-by: Gopi Krishna Menon <krishnagopi487@gmail.com>
Link: https://lore.kernel.org/r/20251024202042.752160-1-krishnagopi487@gmail.com
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
2025-11-12 11:54:37 -08:00
Srinivas Pandruvada
4b747cc628 cpufreq: intel_pstate: Check IDA only before MSR_IA32_PERF_CTL writes
Commit ac4e04d9e378 ("cpufreq: intel_pstate: Unchecked MSR aceess in
legacy mode") introduced a check for feature X86_FEATURE_IDA to verify
turbo mode support. Although this is the correct way to check for turbo
mode support, it causes issues on some platforms that disable turbo
during OS boot, but enable it later [1]. Before adding this feature
check, users were able to get turbo mode frequencies by writing 0 to
/sys/devices/system/cpu/intel_pstate/no_turbo post-boot.

To restore the old behavior on the affected systems while still
addressing the unchecked MSR issue on some Skylake-X systems, check
X86_FEATURE_IDA only immediately before updates of MSR_IA32_PERF_CTL
that may involve setting the Turbo Engage Bit (bit 32).

Fixes: ac4e04d9e378 ("cpufreq: intel_pstate: Unchecked MSR aceess in legacy mode")
Reported-by: Aaron Rainbolt <arainbolt@kfocus.org>
Closes: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/2122531 [1]
Tested-by: Aaron Rainbolt <arainbolt@kfocus.org>
Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
[ rjw: Subject adjustment, changelog edits ]
Link: https://patch.msgid.link/20251111010840.141490-1-srinivas.pandruvada@linux.intel.com
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2025-11-12 17:59:37 +01:00
Alex Mastro
d323ad7396 vfio: selftests: replace iova=vaddr with allocated iovas
vfio_dma_mapping_test and vfio_pci_driver_test currently use iova=vaddr
as part of DMA mapping operations. However, not all IOMMUs support the
same virtual address width as the processor. For instance, older Intel
consumer platforms only support 39-bits of IOMMU address space. On such
platforms, using the virtual address as the IOVA fails.

Make the tests more robust by using iova_allocator to vend IOVAs, which
queries legally accessible IOVAs from the underlying IOMMUFD or VFIO
container.

Reviewed-by: David Matlack <dmatlack@google.com>
Tested-by: David Matlack <dmatlack@google.com>
Signed-off-by: Alex Mastro <amastro@fb.com>
Link: https://lore.kernel.org/r/20251111-iova-ranges-v3-4-7960244642c5@fb.com
Signed-off-by: Alex Williamson <alex@shazbot.org>
2025-11-12 08:04:42 -07:00
Alex Mastro
ce0e3c403e vfio: selftests: add iova allocator
Add struct iova_allocator, which gives tests a convenient way to generate
legally-accessible IOVAs to map. This allocator traverses the sorted
available IOVA ranges linearly, requires power-of-two size allocations,
and does not support freeing iova allocations. The assumption is that
tests are not IOVA space-bounded, and will not need to recycle IOVAs.

This is based on Alex Williamson's patch series for adding an IOVA
allocator [1].

[1] https://lore.kernel.org/all/20251108212954.26477-1-alex@shazbot.org/

Reviewed-by: David Matlack <dmatlack@google.com>
Tested-by: David Matlack <dmatlack@google.com>
Signed-off-by: Alex Mastro <amastro@fb.com>
Link: https://lore.kernel.org/r/20251111-iova-ranges-v3-3-7960244642c5@fb.com
Signed-off-by: Alex Williamson <alex@shazbot.org>
2025-11-12 08:04:42 -07:00
Alex Mastro
a77fa0b922 vfio: selftests: fix map limit tests to use last available iova
Use the newly available vfio_pci_iova_ranges() to determine the last
legal IOVA, and use this as the basis for vfio_dma_map_limit_test tests.

Fixes: de8d1f2fd5a5 ("vfio: selftests: add end of address space DMA map/unmap tests")
Reviewed-by: David Matlack <dmatlack@google.com>
Tested-by: David Matlack <dmatlack@google.com>
Signed-off-by: Alex Mastro <amastro@fb.com>
Link: https://lore.kernel.org/r/20251111-iova-ranges-v3-2-7960244642c5@fb.com
Signed-off-by: Alex Williamson <alex@shazbot.org>
2025-11-12 08:04:42 -07:00
Alex Mastro
7c44656ab3 vfio: selftests: add iova range query helpers
VFIO selftests need to map IOVAs from legally accessible ranges, which
could vary between hardware. Tests in vfio_dma_mapping_test.c are making
excessively strong assumptions about which IOVAs can be mapped.

Add vfio_iommu_iova_ranges(), which queries IOVA ranges from the
IOMMUFD or VFIO container associated with the device. The queried ranges
are normalized to IOMMUFD's iommu_iova_range representation so that
handling of IOVA ranges up the stack can be implementation-agnostic.
iommu_iova_range and vfio_iova_range are equivalent, so bias to using the
new interface's struct.

Query IOMMUFD's ranges with IOMMU_IOAS_IOVA_RANGES.
Query VFIO container's ranges with VFIO_IOMMU_GET_INFO and
VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE.

The underlying vfio_iommu_type1_info buffer-related functionality has
been kept generic so the same helpers can be used to query other
capability chain information, if needed.

Reviewed-by: David Matlack <dmatlack@google.com>
Tested-by: David Matlack <dmatlack@google.com>
Signed-off-by: Alex Mastro <amastro@fb.com>
Link: https://lore.kernel.org/r/20251111-iova-ranges-v3-1-7960244642c5@fb.com
Signed-off-by: Alex Williamson <alex@shazbot.org>
2025-11-12 08:04:42 -07:00
Kaushlendra Kumar
4b93d211bb ACPI: MRRM: Fix memory leaks and improve error handling
Add proper error handling and resource cleanup to prevent memory leaks
in add_boot_memory_ranges(). The function now checks for NULL return
from kobject_create_and_add(), uses local buffer for range names to
avoid dynamic allocation, and implements a cleanup path that removes
previously created sysfs groups and kobjects on failure.

This prevents resource leaks when kobject creation or sysfs group
creation fails during boot memory range initialization.

Signed-off-by: Kaushlendra Kumar <kaushlendra.kumar@intel.com>
Reviewed-by: Tony Luck <tony.luck@intel.com>
Link: https://patch.msgid.link/20251030023228.3956296-1-kaushlendra.kumar@intel.com
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2025-11-07 21:48:49 +01:00
Gautham R. Shenoy
0fce758706 ACPI: CPPC: Limit perf ctrs in PCC check only to online CPUs
per_cpu(cpc_desc_ptr, cpu) object is initialized for only the online
CPU via acpi_soft_cpu_online() --> __acpi_processor_start() -->
acpi_cppc_processor_probe().

However the function cppc_perf_ctrs_in_pcc() checks if the CPPC
perf-ctrs are in a PCC region for all the present CPUs, which breaks
when the kernel is booted with "nosmt=force".

Hence, limit the check only to the online CPUs.

Fixes: ae2df912d1a5 ("ACPI: CPPC: Disable FIE if registers in PCC regions")
Reviewed-by: "Mario Limonciello (AMD) (kernel.org)" <superm1@kernel.org>
Signed-off-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Link: https://patch.msgid.link/20251107074145.2340-5-gautham.shenoy@amd.com
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2025-11-07 18:37:42 +01:00
Gautham R. Shenoy
8821c8e80a ACPI: CPPC: Perform fast check switch only for online CPUs
per_cpu(cpc_desc_ptr, cpu) object is initialized for only the online
CPUs via acpi_soft_cpu_online() --> __acpi_processor_start() -->
acpi_cppc_processor_probe().

However the function cppc_allow_fast_switch() checks for the validity
of the _CPC object for all the present CPUs. This breaks when the
kernel is booted with "nosmt=force".

Check fast_switch capability only on online CPUs

Fixes: 15eece6c5b05 ("ACPI: CPPC: Fix NULL pointer dereference when nosmp is used")
Reviewed-by: "Mario Limonciello (AMD) (kernel.org)" <superm1@kernel.org>
Signed-off-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Link: https://patch.msgid.link/20251107074145.2340-4-gautham.shenoy@amd.com
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2025-11-07 18:37:42 +01:00
Gautham R. Shenoy
6dd3b8a709 ACPI: CPPC: Check _CPC validity for only the online CPUs
per_cpu(cpc_desc_ptr, cpu) object is initialized for only the online
CPUs via acpi_soft_cpu_online() --> __acpi_processor_start() -->
acpi_cppc_processor_probe().

However the function acpi_cpc_valid() checks for the validity of the
_CPC object for all the present CPUs. This breaks when the kernel is
booted with "nosmt=force".

Hence check the validity of the _CPC objects of only the online CPUs.

Fixes: 2aeca6bd0277 ("ACPI: CPPC: Check present CPUs for determining _CPC is valid")
Reported-by: Christopher Harris <chris.harris79@gmail.com>
Closes: https://lore.kernel.org/lkml/CAM+eXpdDT7KjLV0AxEwOLkSJ2QtrsvGvjA2cCHvt1d0k2_C4Cw@mail.gmail.com/
Suggested-by: Mario Limonciello <mario.limonciello@amd.com>
Reviewed-by: "Mario Limonciello (AMD) (kernel.org)" <superm1@kernel.org>
Tested-by: Chrisopher Harris <chris.harris79@gmail.com>
Signed-off-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Link: https://patch.msgid.link/20251107074145.2340-3-gautham.shenoy@amd.com
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2025-11-07 18:37:42 +01:00
Gautham R. Shenoy
4fe5934db4 ACPI: CPPC: Detect preferred core availability on online CPUs
Commit 279f838a61f9 ("x86/amd: Detect preferred cores in
amd_get_boost_ratio_numerator()") introduced the ability to detect the
preferred core on AMD platforms by checking if there at least two
distinct highest_perf values.

However, it uses for_each_present_cpu() to iterate through all the
CPUs in the platform, which is problematic when the kernel is booted
with "nosmt=force" commandline option.

Hence limit the search to only the online CPUs.

Fixes: 279f838a61f9 ("x86/amd: Detect preferred cores in amd_get_boost_ratio_numerator()")
Reported-by: Christopher Harris <chris.harris79@gmail.com>
Closes: https://lore.kernel.org/lkml/CAM+eXpdDT7KjLV0AxEwOLkSJ2QtrsvGvjA2cCHvt1d0k2_C4Cw@mail.gmail.com/
Reviewed-by: "Mario Limonciello (AMD) (kernel.org)" <superm1@kernel.org>
Tested-by: Chrisopher Harris <chris.harris79@gmail.com>
Signed-off-by: Gautham R. Shenoy <gautham.shenoy@amd.com>
Link: https://patch.msgid.link/20251107074145.2340-2-gautham.shenoy@amd.com
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2025-11-07 18:37:42 +01:00
Mario Limonciello (AMD)
0b6c10cb84 PM: hibernate: Fix style issues in save_compressed_image()
Address two issues indicated by checkpatch:

 - Trailing statements should be on next line.
 - Prefer 'unsigned int' to bare use of 'unsigned'.

Signed-off-by: Mario Limonciello (AMD) <superm1@kernel.org>
[ rjw: Changelog edits ]
Link: https://patch.msgid.link/20251106045158.3198061-4-superm1@kernel.org
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2025-11-07 16:53:57 +01:00
Mario Limonciello (AMD)
66ededc694 PM: hibernate: Use atomic64_t for compressed_size variable
`compressed_size` can overflow, showing nonsensical values.

Change from `atomic_t` to `atomic64_t` to prevent overflow.

Fixes: a06c6f5d3cc9 ("PM: hibernate: Move to crypto APIs for LZO compression")
Reported-by: Askar Safin <safinaskar@gmail.com>
Closes: https://lore.kernel.org/linux-pm/20251105180506.137448-1-safinaskar@gmail.com/
Signed-off-by: Mario Limonciello (AMD) <superm1@kernel.org>
Tested-by: Askar Safin <safinaskar@gmail.com>
Cc: 6.9+ <stable@vger.kernel.org> # 6.9+
Link: https://patch.msgid.link/20251106045158.3198061-3-superm1@kernel.org
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2025-11-07 16:53:56 +01:00
Mario Limonciello (AMD)
62b9ca1706 PM: hibernate: Emit an error when image writing fails
If image writing fails, a return code is passed up to the caller, but
none of the callers log anything to the log and so the only record
of it is the return code that userspace gets.

Adjust the logging so that the image size and speed of writing is
only emitted on success and if there is an error, it's saved to the
logs.

Fixes: a06c6f5d3cc9 ("PM: hibernate: Move to crypto APIs for LZO compression")
Reported-by: Askar Safin <safinaskar@gmail.com>
Closes: https://lore.kernel.org/linux-pm/20251105180506.137448-1-safinaskar@gmail.com/
Signed-off-by: Mario Limonciello (AMD) <superm1@kernel.org>
Tested-by: Askar Safin <safinaskar@gmail.com>
Cc: 6.9+ <stable@vger.kernel.org> # 6.9+
[ rjw: Added missing braces after "else", changelog edits ]
Link: https://patch.msgid.link/20251106045158.3198061-2-superm1@kernel.org
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2025-11-07 16:53:56 +01:00
10 changed files with 372 additions and 69 deletions

View File

@ -196,7 +196,7 @@ int amd_detect_prefcore(bool *detected)
break;
}
for_each_present_cpu(cpu) {
for_each_online_cpu(cpu) {
u32 tmp;
int ret;

View File

@ -152,26 +152,49 @@ ATTRIBUTE_GROUPS(memory_range);
static __init int add_boot_memory_ranges(void)
{
struct kobject *pkobj, *kobj;
struct kobject *pkobj, *kobj, **kobjs;
int ret = -EINVAL;
char *name;
char name[16];
int i;
pkobj = kobject_create_and_add("memory_ranges", acpi_kobj);
if (!pkobj)
return -ENOMEM;
for (int i = 0; i < mrrm_mem_entry_num; i++) {
name = kasprintf(GFP_KERNEL, "range%d", i);
if (!name) {
ret = -ENOMEM;
break;
}
kobj = kobject_create_and_add(name, pkobj);
ret = sysfs_create_groups(kobj, memory_range_groups);
if (ret)
return ret;
kobjs = kcalloc(mrrm_mem_entry_num, sizeof(*kobjs), GFP_KERNEL);
if (!kobjs) {
kobject_put(pkobj);
return -ENOMEM;
}
for (i = 0; i < mrrm_mem_entry_num; i++) {
scnprintf(name, sizeof(name), "range%d", i);
kobj = kobject_create_and_add(name, pkobj);
if (!kobj) {
ret = -ENOMEM;
goto cleanup;
}
ret = sysfs_create_groups(kobj, memory_range_groups);
if (ret) {
kobject_put(kobj);
goto cleanup;
}
kobjs[i] = kobj;
}
kfree(kobjs);
return 0;
cleanup:
for (int j = 0; j < i; j++) {
if (kobjs[j]) {
sysfs_remove_groups(kobjs[j], memory_range_groups);
kobject_put(kobjs[j]);
}
}
kfree(kobjs);
kobject_put(pkobj);
return ret;
}

View File

@ -460,7 +460,7 @@ bool acpi_cpc_valid(void)
if (acpi_disabled)
return false;
for_each_present_cpu(cpu) {
for_each_online_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_ptr)
return false;
@ -476,7 +476,7 @@ bool cppc_allow_fast_switch(void)
struct cpc_desc *cpc_ptr;
int cpu;
for_each_present_cpu(cpu) {
for_each_online_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
@ -1435,7 +1435,7 @@ bool cppc_perf_ctrs_in_pcc(void)
{
int cpu;
for_each_present_cpu(cpu) {
for_each_online_cpu(cpu) {
struct cpc_register_resource *ref_perf_reg;
struct cpc_desc *cpc_desc;

View File

@ -603,9 +603,6 @@ static bool turbo_is_disabled(void)
{
u64 misc_en;
if (!cpu_feature_enabled(X86_FEATURE_IDA))
return true;
rdmsrq(MSR_IA32_MISC_ENABLE, misc_en);
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
@ -2106,7 +2103,8 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
u32 vid;
val = (u64)pstate << 8;
if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) &&
cpu_feature_enabled(X86_FEATURE_IDA))
val |= (u64)1 << 32;
vid_fp = cpudata->vid.min + mul_fp(
@ -2271,7 +2269,8 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
u64 val;
val = (u64)pstate << 8;
if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) &&
cpu_feature_enabled(X86_FEATURE_IDA))
val |= (u64)1 << 32;
return val;

View File

@ -12,9 +12,9 @@
* Copyright (c) 2024 Cryolitia PukNgae
*/
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/hwmon.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
@ -276,31 +276,6 @@ static int gpd_generic_read_rpm(void)
return (u16)high << 8 | low;
}
static void gpd_win4_init_ec(void)
{
u8 chip_id, chip_ver;
gpd_ecram_read(0x2000, &chip_id);
if (chip_id == 0x55) {
gpd_ecram_read(0x1060, &chip_ver);
gpd_ecram_write(0x1060, chip_ver | 0x80);
}
}
static int gpd_win4_read_rpm(void)
{
int ret;
ret = gpd_generic_read_rpm();
if (ret == 0)
// Re-init EC when speed is 0
gpd_win4_init_ec();
return ret;
}
static int gpd_wm2_read_rpm(void)
{
for (u16 pwm_ctr_offset = GPD_PWM_CTR_OFFSET;
@ -320,11 +295,10 @@ static int gpd_wm2_read_rpm(void)
static int gpd_read_rpm(void)
{
switch (gpd_driver_priv.drvdata->board) {
case win4_6800u:
case win_mini:
case duo:
return gpd_generic_read_rpm();
case win4_6800u:
return gpd_win4_read_rpm();
case win_max_2:
return gpd_wm2_read_rpm();
}
@ -607,6 +581,28 @@ static struct hwmon_chip_info gpd_fan_chip_info = {
.info = gpd_fan_hwmon_channel_info
};
static void gpd_win4_init_ec(void)
{
u8 chip_id, chip_ver;
gpd_ecram_read(0x2000, &chip_id);
if (chip_id == 0x55) {
gpd_ecram_read(0x1060, &chip_ver);
gpd_ecram_write(0x1060, chip_ver | 0x80);
}
}
static void gpd_init_ec(void)
{
// The buggy firmware won't initialize EC properly on boot.
// Before its initialization, reading RPM will always return 0,
// and writing PWM will have no effect.
// Initialize it manually on driver load.
if (gpd_driver_priv.drvdata->board == win4_6800u)
gpd_win4_init_ec();
}
static int gpd_fan_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@ -634,6 +630,8 @@ static int gpd_fan_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(hwdev),
"Failed to register hwmon device\n");
gpd_init_ec();
return 0;
}

View File

@ -635,7 +635,7 @@ struct cmp_data {
};
/* Indicates the image size after compression */
static atomic_t compressed_size = ATOMIC_INIT(0);
static atomic64_t compressed_size = ATOMIC_INIT(0);
/*
* Compression function that runs in its own thread.
@ -664,7 +664,7 @@ static int compress_threadfn(void *data)
d->ret = crypto_acomp_compress(d->cr);
d->cmp_len = d->cr->dlen;
atomic_set(&compressed_size, atomic_read(&compressed_size) + d->cmp_len);
atomic64_add(d->cmp_len, &compressed_size);
atomic_set_release(&d->stop, 1);
wake_up(&d->done);
}
@ -689,14 +689,14 @@ static int save_compressed_image(struct swap_map_handle *handle,
ktime_t start;
ktime_t stop;
size_t off;
unsigned thr, run_threads, nr_threads;
unsigned int thr, run_threads, nr_threads;
unsigned char *page = NULL;
struct cmp_data *data = NULL;
struct crc_data *crc = NULL;
hib_init_batch(&hb);
atomic_set(&compressed_size, 0);
atomic64_set(&compressed_size, 0);
/*
* We'll limit the number of threads for compression to limit memory
@ -877,11 +877,14 @@ out_finish:
stop = ktime_get();
if (!ret)
ret = err2;
if (!ret)
if (!ret) {
swsusp_show_speed(start, stop, nr_to_write, "Wrote");
pr_info("Image size after compression: %lld kbytes\n",
(atomic64_read(&compressed_size) / 1024));
pr_info("Image saving done\n");
swsusp_show_speed(start, stop, nr_to_write, "Wrote");
pr_info("Image size after compression: %d kbytes\n",
(atomic_read(&compressed_size) / 1024));
} else {
pr_err("Image saving failed: %d\n", ret);
}
out_clean:
hib_finish_batch(&hb);
@ -899,7 +902,8 @@ out_clean:
}
vfree(data);
}
if (page) free_page((unsigned long)page);
if (page)
free_page((unsigned long)page);
return ret;
}

View File

@ -4,9 +4,12 @@
#include <fcntl.h>
#include <string.h>
#include <linux/vfio.h>
#include <uapi/linux/types.h>
#include <linux/iommufd.h>
#include <linux/list.h>
#include <linux/pci_regs.h>
#include <linux/vfio.h>
#include "../../../kselftest.h"
@ -185,6 +188,13 @@ struct vfio_pci_device {
struct vfio_pci_driver driver;
};
struct iova_allocator {
struct iommu_iova_range *ranges;
u32 nranges;
u32 range_idx;
u64 range_offset;
};
/*
* Return the BDF string of the device that the test should use.
*
@ -206,6 +216,13 @@ struct vfio_pci_device *vfio_pci_device_init(const char *bdf, const char *iommu_
void vfio_pci_device_cleanup(struct vfio_pci_device *device);
void vfio_pci_device_reset(struct vfio_pci_device *device);
struct iommu_iova_range *vfio_pci_iova_ranges(struct vfio_pci_device *device,
u32 *nranges);
struct iova_allocator *iova_allocator_init(struct vfio_pci_device *device);
void iova_allocator_cleanup(struct iova_allocator *allocator);
iova_t iova_allocator_alloc(struct iova_allocator *allocator, size_t size);
int __vfio_pci_dma_map(struct vfio_pci_device *device,
struct vfio_dma_region *region);
int __vfio_pci_dma_unmap(struct vfio_pci_device *device,

View File

@ -12,11 +12,12 @@
#include <sys/mman.h>
#include <uapi/linux/types.h>
#include <linux/iommufd.h>
#include <linux/limits.h>
#include <linux/mman.h>
#include <linux/overflow.h>
#include <linux/types.h>
#include <linux/vfio.h>
#include <linux/iommufd.h>
#include "../../../kselftest.h"
#include <vfio_util.h>
@ -29,6 +30,249 @@
VFIO_ASSERT_EQ(__ret, 0, "ioctl(%s, %s, %s) returned %d\n", #_fd, #_op, #_arg, __ret); \
} while (0)
static struct vfio_info_cap_header *next_cap_hdr(void *buf, u32 bufsz,
u32 *cap_offset)
{
struct vfio_info_cap_header *hdr;
if (!*cap_offset)
return NULL;
VFIO_ASSERT_LT(*cap_offset, bufsz);
VFIO_ASSERT_GE(bufsz - *cap_offset, sizeof(*hdr));
hdr = (struct vfio_info_cap_header *)((u8 *)buf + *cap_offset);
*cap_offset = hdr->next;
return hdr;
}
static struct vfio_info_cap_header *vfio_iommu_info_cap_hdr(struct vfio_iommu_type1_info *info,
u16 cap_id)
{
struct vfio_info_cap_header *hdr;
u32 cap_offset = info->cap_offset;
u32 max_depth;
u32 depth = 0;
if (!(info->flags & VFIO_IOMMU_INFO_CAPS))
return NULL;
if (cap_offset)
VFIO_ASSERT_GE(cap_offset, sizeof(*info));
max_depth = (info->argsz - sizeof(*info)) / sizeof(*hdr);
while ((hdr = next_cap_hdr(info, info->argsz, &cap_offset))) {
depth++;
VFIO_ASSERT_LE(depth, max_depth, "Capability chain contains a cycle\n");
if (hdr->id == cap_id)
return hdr;
}
return NULL;
}
/* Return buffer including capability chain, if present. Free with free() */
static struct vfio_iommu_type1_info *vfio_iommu_get_info(struct vfio_pci_device *device)
{
struct vfio_iommu_type1_info *info;
info = malloc(sizeof(*info));
VFIO_ASSERT_NOT_NULL(info);
*info = (struct vfio_iommu_type1_info) {
.argsz = sizeof(*info),
};
ioctl_assert(device->container_fd, VFIO_IOMMU_GET_INFO, info);
VFIO_ASSERT_GE(info->argsz, sizeof(*info));
info = realloc(info, info->argsz);
VFIO_ASSERT_NOT_NULL(info);
ioctl_assert(device->container_fd, VFIO_IOMMU_GET_INFO, info);
VFIO_ASSERT_GE(info->argsz, sizeof(*info));
return info;
}
/*
* Return iova ranges for the device's container. Normalize vfio_iommu_type1 to
* report iommufd's iommu_iova_range. Free with free().
*/
static struct iommu_iova_range *vfio_iommu_iova_ranges(struct vfio_pci_device *device,
u32 *nranges)
{
struct vfio_iommu_type1_info_cap_iova_range *cap_range;
struct vfio_iommu_type1_info *info;
struct vfio_info_cap_header *hdr;
struct iommu_iova_range *ranges = NULL;
info = vfio_iommu_get_info(device);
hdr = vfio_iommu_info_cap_hdr(info, VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE);
VFIO_ASSERT_NOT_NULL(hdr);
cap_range = container_of(hdr, struct vfio_iommu_type1_info_cap_iova_range, header);
VFIO_ASSERT_GT(cap_range->nr_iovas, 0);
ranges = calloc(cap_range->nr_iovas, sizeof(*ranges));
VFIO_ASSERT_NOT_NULL(ranges);
for (u32 i = 0; i < cap_range->nr_iovas; i++) {
ranges[i] = (struct iommu_iova_range){
.start = cap_range->iova_ranges[i].start,
.last = cap_range->iova_ranges[i].end,
};
}
*nranges = cap_range->nr_iovas;
free(info);
return ranges;
}
/* Return iova ranges of the device's IOAS. Free with free() */
static struct iommu_iova_range *iommufd_iova_ranges(struct vfio_pci_device *device,
u32 *nranges)
{
struct iommu_iova_range *ranges;
int ret;
struct iommu_ioas_iova_ranges query = {
.size = sizeof(query),
.ioas_id = device->ioas_id,
};
ret = ioctl(device->iommufd, IOMMU_IOAS_IOVA_RANGES, &query);
VFIO_ASSERT_EQ(ret, -1);
VFIO_ASSERT_EQ(errno, EMSGSIZE);
VFIO_ASSERT_GT(query.num_iovas, 0);
ranges = calloc(query.num_iovas, sizeof(*ranges));
VFIO_ASSERT_NOT_NULL(ranges);
query.allowed_iovas = (uintptr_t)ranges;
ioctl_assert(device->iommufd, IOMMU_IOAS_IOVA_RANGES, &query);
*nranges = query.num_iovas;
return ranges;
}
static int iova_range_comp(const void *a, const void *b)
{
const struct iommu_iova_range *ra = a, *rb = b;
if (ra->start < rb->start)
return -1;
if (ra->start > rb->start)
return 1;
return 0;
}
/* Return sorted IOVA ranges of the device. Free with free(). */
struct iommu_iova_range *vfio_pci_iova_ranges(struct vfio_pci_device *device,
u32 *nranges)
{
struct iommu_iova_range *ranges;
if (device->iommufd)
ranges = iommufd_iova_ranges(device, nranges);
else
ranges = vfio_iommu_iova_ranges(device, nranges);
if (!ranges)
return NULL;
VFIO_ASSERT_GT(*nranges, 0);
/* Sort and check that ranges are sane and non-overlapping */
qsort(ranges, *nranges, sizeof(*ranges), iova_range_comp);
VFIO_ASSERT_LT(ranges[0].start, ranges[0].last);
for (u32 i = 1; i < *nranges; i++) {
VFIO_ASSERT_LT(ranges[i].start, ranges[i].last);
VFIO_ASSERT_LT(ranges[i - 1].last, ranges[i].start);
}
return ranges;
}
struct iova_allocator *iova_allocator_init(struct vfio_pci_device *device)
{
struct iova_allocator *allocator;
struct iommu_iova_range *ranges;
u32 nranges;
ranges = vfio_pci_iova_ranges(device, &nranges);
VFIO_ASSERT_NOT_NULL(ranges);
allocator = malloc(sizeof(*allocator));
VFIO_ASSERT_NOT_NULL(allocator);
*allocator = (struct iova_allocator){
.ranges = ranges,
.nranges = nranges,
.range_idx = 0,
.range_offset = 0,
};
return allocator;
}
void iova_allocator_cleanup(struct iova_allocator *allocator)
{
free(allocator->ranges);
free(allocator);
}
iova_t iova_allocator_alloc(struct iova_allocator *allocator, size_t size)
{
VFIO_ASSERT_GT(size, 0, "Invalid size arg, zero\n");
VFIO_ASSERT_EQ(size & (size - 1), 0, "Invalid size arg, non-power-of-2\n");
for (;;) {
struct iommu_iova_range *range;
iova_t iova, last;
VFIO_ASSERT_LT(allocator->range_idx, allocator->nranges,
"IOVA allocator out of space\n");
range = &allocator->ranges[allocator->range_idx];
iova = range->start + allocator->range_offset;
/* Check for sufficient space at the current offset */
if (check_add_overflow(iova, size - 1, &last) ||
last > range->last)
goto next_range;
/* Align iova to size */
iova = last & ~(size - 1);
/* Check for sufficient space at the aligned iova */
if (check_add_overflow(iova, size - 1, &last) ||
last > range->last)
goto next_range;
if (last == range->last) {
allocator->range_idx++;
allocator->range_offset = 0;
} else {
allocator->range_offset = last - range->start + 1;
}
return iova;
next_range:
allocator->range_idx++;
allocator->range_offset = 0;
}
}
iova_t __to_iova(struct vfio_pci_device *device, void *vaddr)
{
struct vfio_dma_region *region;

View File

@ -3,6 +3,8 @@
#include <sys/mman.h>
#include <unistd.h>
#include <uapi/linux/types.h>
#include <linux/iommufd.h>
#include <linux/limits.h>
#include <linux/mman.h>
#include <linux/sizes.h>
@ -93,6 +95,7 @@ static int iommu_mapping_get(const char *bdf, u64 iova,
FIXTURE(vfio_dma_mapping_test) {
struct vfio_pci_device *device;
struct iova_allocator *iova_allocator;
};
FIXTURE_VARIANT(vfio_dma_mapping_test) {
@ -117,10 +120,12 @@ FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB |
FIXTURE_SETUP(vfio_dma_mapping_test)
{
self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
self->iova_allocator = iova_allocator_init(self->device);
}
FIXTURE_TEARDOWN(vfio_dma_mapping_test)
{
iova_allocator_cleanup(self->iova_allocator);
vfio_pci_device_cleanup(self->device);
}
@ -142,7 +147,7 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap)
else
ASSERT_NE(region.vaddr, MAP_FAILED);
region.iova = (u64)region.vaddr;
region.iova = iova_allocator_alloc(self->iova_allocator, size);
region.size = size;
vfio_pci_dma_map(self->device, &region);
@ -219,7 +224,10 @@ FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES();
FIXTURE_SETUP(vfio_dma_map_limit_test)
{
struct vfio_dma_region *region = &self->region;
struct iommu_iova_range *ranges;
u64 region_size = getpagesize();
iova_t last_iova;
u32 nranges;
/*
* Over-allocate mmap by double the size to provide enough backing vaddr
@ -232,8 +240,13 @@ FIXTURE_SETUP(vfio_dma_map_limit_test)
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
ASSERT_NE(region->vaddr, MAP_FAILED);
/* One page prior to the end of address space */
region->iova = ~(iova_t)0 & ~(region_size - 1);
ranges = vfio_pci_iova_ranges(self->device, &nranges);
VFIO_ASSERT_NOT_NULL(ranges);
last_iova = ranges[nranges - 1].last;
free(ranges);
/* One page prior to the last iova */
region->iova = last_iova & ~(region_size - 1);
region->size = region_size;
}
@ -276,6 +289,7 @@ TEST_F(vfio_dma_map_limit_test, overflow)
struct vfio_dma_region *region = &self->region;
int rc;
region->iova = ~(iova_t)0 & ~(region->size - 1);
region->size = self->mmap_size;
rc = __vfio_pci_dma_map(self->device, region);

View File

@ -19,6 +19,7 @@ static const char *device_bdf;
} while (0)
static void region_setup(struct vfio_pci_device *device,
struct iova_allocator *iova_allocator,
struct vfio_dma_region *region, u64 size)
{
const int flags = MAP_SHARED | MAP_ANONYMOUS;
@ -29,7 +30,7 @@ static void region_setup(struct vfio_pci_device *device,
VFIO_ASSERT_NE(vaddr, MAP_FAILED);
region->vaddr = vaddr;
region->iova = (u64)vaddr;
region->iova = iova_allocator_alloc(iova_allocator, size);
region->size = size;
vfio_pci_dma_map(device, region);
@ -44,6 +45,7 @@ static void region_teardown(struct vfio_pci_device *device,
FIXTURE(vfio_pci_driver_test) {
struct vfio_pci_device *device;
struct iova_allocator *iova_allocator;
struct vfio_dma_region memcpy_region;
void *vaddr;
int msi_fd;
@ -72,14 +74,15 @@ FIXTURE_SETUP(vfio_pci_driver_test)
struct vfio_pci_driver *driver;
self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
self->iova_allocator = iova_allocator_init(self->device);
driver = &self->device->driver;
region_setup(self->device, &self->memcpy_region, SZ_1G);
region_setup(self->device, &driver->region, SZ_2M);
region_setup(self->device, self->iova_allocator, &self->memcpy_region, SZ_1G);
region_setup(self->device, self->iova_allocator, &driver->region, SZ_2M);
/* Any IOVA that doesn't overlap memcpy_region and driver->region. */
self->unmapped_iova = 8UL * SZ_1G;
self->unmapped_iova = iova_allocator_alloc(self->iova_allocator, SZ_1G);
vfio_pci_driver_init(self->device);
self->msi_fd = self->device->msi_eventfds[driver->msi];
@ -108,6 +111,7 @@ FIXTURE_TEARDOWN(vfio_pci_driver_test)
region_teardown(self->device, &self->memcpy_region);
region_teardown(self->device, &driver->region);
iova_allocator_cleanup(self->iova_allocator);
vfio_pci_device_cleanup(self->device);
}