mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 17:10:13 +00:00
Compare commits
21 Commits
9b9e43704d
...
6da43bbeb6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6da43bbeb6 | ||
|
|
01814e11e5 | ||
|
|
aecba2e013 | ||
|
|
6a3cc1b749 | ||
|
|
161284b26f | ||
|
|
7564f3543c | ||
|
|
c55a8e24cd | ||
|
|
9efb297c52 | ||
|
|
4b747cc628 | ||
|
|
d323ad7396 | ||
|
|
ce0e3c403e | ||
|
|
a77fa0b922 | ||
|
|
7c44656ab3 | ||
|
|
4b93d211bb | ||
|
|
0fce758706 | ||
|
|
8821c8e80a | ||
|
|
6dd3b8a709 | ||
|
|
4fe5934db4 | ||
|
|
0b6c10cb84 | ||
|
|
66ededc694 | ||
|
|
62b9ca1706 |
@ -196,7 +196,7 @@ int amd_detect_prefcore(bool *detected)
|
||||
break;
|
||||
}
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
for_each_online_cpu(cpu) {
|
||||
u32 tmp;
|
||||
int ret;
|
||||
|
||||
|
||||
@ -152,26 +152,49 @@ ATTRIBUTE_GROUPS(memory_range);
|
||||
|
||||
static __init int add_boot_memory_ranges(void)
|
||||
{
|
||||
struct kobject *pkobj, *kobj;
|
||||
struct kobject *pkobj, *kobj, **kobjs;
|
||||
int ret = -EINVAL;
|
||||
char *name;
|
||||
char name[16];
|
||||
int i;
|
||||
|
||||
pkobj = kobject_create_and_add("memory_ranges", acpi_kobj);
|
||||
if (!pkobj)
|
||||
return -ENOMEM;
|
||||
|
||||
for (int i = 0; i < mrrm_mem_entry_num; i++) {
|
||||
name = kasprintf(GFP_KERNEL, "range%d", i);
|
||||
if (!name) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
kobj = kobject_create_and_add(name, pkobj);
|
||||
|
||||
ret = sysfs_create_groups(kobj, memory_range_groups);
|
||||
if (ret)
|
||||
return ret;
|
||||
kobjs = kcalloc(mrrm_mem_entry_num, sizeof(*kobjs), GFP_KERNEL);
|
||||
if (!kobjs) {
|
||||
kobject_put(pkobj);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < mrrm_mem_entry_num; i++) {
|
||||
scnprintf(name, sizeof(name), "range%d", i);
|
||||
kobj = kobject_create_and_add(name, pkobj);
|
||||
if (!kobj) {
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ret = sysfs_create_groups(kobj, memory_range_groups);
|
||||
if (ret) {
|
||||
kobject_put(kobj);
|
||||
goto cleanup;
|
||||
}
|
||||
kobjs[i] = kobj;
|
||||
}
|
||||
|
||||
kfree(kobjs);
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
for (int j = 0; j < i; j++) {
|
||||
if (kobjs[j]) {
|
||||
sysfs_remove_groups(kobjs[j], memory_range_groups);
|
||||
kobject_put(kobjs[j]);
|
||||
}
|
||||
}
|
||||
kfree(kobjs);
|
||||
kobject_put(pkobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@ -460,7 +460,7 @@ bool acpi_cpc_valid(void)
|
||||
if (acpi_disabled)
|
||||
return false;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
for_each_online_cpu(cpu) {
|
||||
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
|
||||
if (!cpc_ptr)
|
||||
return false;
|
||||
@ -476,7 +476,7 @@ bool cppc_allow_fast_switch(void)
|
||||
struct cpc_desc *cpc_ptr;
|
||||
int cpu;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
for_each_online_cpu(cpu) {
|
||||
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
|
||||
desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
|
||||
if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
|
||||
@ -1435,7 +1435,7 @@ bool cppc_perf_ctrs_in_pcc(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
for_each_online_cpu(cpu) {
|
||||
struct cpc_register_resource *ref_perf_reg;
|
||||
struct cpc_desc *cpc_desc;
|
||||
|
||||
|
||||
@ -603,9 +603,6 @@ static bool turbo_is_disabled(void)
|
||||
{
|
||||
u64 misc_en;
|
||||
|
||||
if (!cpu_feature_enabled(X86_FEATURE_IDA))
|
||||
return true;
|
||||
|
||||
rdmsrq(MSR_IA32_MISC_ENABLE, misc_en);
|
||||
|
||||
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
|
||||
@ -2106,7 +2103,8 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
|
||||
u32 vid;
|
||||
|
||||
val = (u64)pstate << 8;
|
||||
if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
|
||||
if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) &&
|
||||
cpu_feature_enabled(X86_FEATURE_IDA))
|
||||
val |= (u64)1 << 32;
|
||||
|
||||
vid_fp = cpudata->vid.min + mul_fp(
|
||||
@ -2271,7 +2269,8 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
|
||||
u64 val;
|
||||
|
||||
val = (u64)pstate << 8;
|
||||
if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
|
||||
if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) &&
|
||||
cpu_feature_enabled(X86_FEATURE_IDA))
|
||||
val |= (u64)1 << 32;
|
||||
|
||||
return val;
|
||||
|
||||
@ -12,9 +12,9 @@
|
||||
* Copyright (c) 2024 Cryolitia PukNgae
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/hwmon.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
@ -276,31 +276,6 @@ static int gpd_generic_read_rpm(void)
|
||||
return (u16)high << 8 | low;
|
||||
}
|
||||
|
||||
static void gpd_win4_init_ec(void)
|
||||
{
|
||||
u8 chip_id, chip_ver;
|
||||
|
||||
gpd_ecram_read(0x2000, &chip_id);
|
||||
|
||||
if (chip_id == 0x55) {
|
||||
gpd_ecram_read(0x1060, &chip_ver);
|
||||
gpd_ecram_write(0x1060, chip_ver | 0x80);
|
||||
}
|
||||
}
|
||||
|
||||
static int gpd_win4_read_rpm(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = gpd_generic_read_rpm();
|
||||
|
||||
if (ret == 0)
|
||||
// Re-init EC when speed is 0
|
||||
gpd_win4_init_ec();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gpd_wm2_read_rpm(void)
|
||||
{
|
||||
for (u16 pwm_ctr_offset = GPD_PWM_CTR_OFFSET;
|
||||
@ -320,11 +295,10 @@ static int gpd_wm2_read_rpm(void)
|
||||
static int gpd_read_rpm(void)
|
||||
{
|
||||
switch (gpd_driver_priv.drvdata->board) {
|
||||
case win4_6800u:
|
||||
case win_mini:
|
||||
case duo:
|
||||
return gpd_generic_read_rpm();
|
||||
case win4_6800u:
|
||||
return gpd_win4_read_rpm();
|
||||
case win_max_2:
|
||||
return gpd_wm2_read_rpm();
|
||||
}
|
||||
@ -607,6 +581,28 @@ static struct hwmon_chip_info gpd_fan_chip_info = {
|
||||
.info = gpd_fan_hwmon_channel_info
|
||||
};
|
||||
|
||||
static void gpd_win4_init_ec(void)
|
||||
{
|
||||
u8 chip_id, chip_ver;
|
||||
|
||||
gpd_ecram_read(0x2000, &chip_id);
|
||||
|
||||
if (chip_id == 0x55) {
|
||||
gpd_ecram_read(0x1060, &chip_ver);
|
||||
gpd_ecram_write(0x1060, chip_ver | 0x80);
|
||||
}
|
||||
}
|
||||
|
||||
static void gpd_init_ec(void)
|
||||
{
|
||||
// The buggy firmware won't initialize EC properly on boot.
|
||||
// Before its initialization, reading RPM will always return 0,
|
||||
// and writing PWM will have no effect.
|
||||
// Initialize it manually on driver load.
|
||||
if (gpd_driver_priv.drvdata->board == win4_6800u)
|
||||
gpd_win4_init_ec();
|
||||
}
|
||||
|
||||
static int gpd_fan_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
@ -634,6 +630,8 @@ static int gpd_fan_probe(struct platform_device *pdev)
|
||||
return dev_err_probe(dev, PTR_ERR(hwdev),
|
||||
"Failed to register hwmon device\n");
|
||||
|
||||
gpd_init_ec();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -635,7 +635,7 @@ struct cmp_data {
|
||||
};
|
||||
|
||||
/* Indicates the image size after compression */
|
||||
static atomic_t compressed_size = ATOMIC_INIT(0);
|
||||
static atomic64_t compressed_size = ATOMIC_INIT(0);
|
||||
|
||||
/*
|
||||
* Compression function that runs in its own thread.
|
||||
@ -664,7 +664,7 @@ static int compress_threadfn(void *data)
|
||||
d->ret = crypto_acomp_compress(d->cr);
|
||||
d->cmp_len = d->cr->dlen;
|
||||
|
||||
atomic_set(&compressed_size, atomic_read(&compressed_size) + d->cmp_len);
|
||||
atomic64_add(d->cmp_len, &compressed_size);
|
||||
atomic_set_release(&d->stop, 1);
|
||||
wake_up(&d->done);
|
||||
}
|
||||
@ -689,14 +689,14 @@ static int save_compressed_image(struct swap_map_handle *handle,
|
||||
ktime_t start;
|
||||
ktime_t stop;
|
||||
size_t off;
|
||||
unsigned thr, run_threads, nr_threads;
|
||||
unsigned int thr, run_threads, nr_threads;
|
||||
unsigned char *page = NULL;
|
||||
struct cmp_data *data = NULL;
|
||||
struct crc_data *crc = NULL;
|
||||
|
||||
hib_init_batch(&hb);
|
||||
|
||||
atomic_set(&compressed_size, 0);
|
||||
atomic64_set(&compressed_size, 0);
|
||||
|
||||
/*
|
||||
* We'll limit the number of threads for compression to limit memory
|
||||
@ -877,11 +877,14 @@ out_finish:
|
||||
stop = ktime_get();
|
||||
if (!ret)
|
||||
ret = err2;
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
swsusp_show_speed(start, stop, nr_to_write, "Wrote");
|
||||
pr_info("Image size after compression: %lld kbytes\n",
|
||||
(atomic64_read(&compressed_size) / 1024));
|
||||
pr_info("Image saving done\n");
|
||||
swsusp_show_speed(start, stop, nr_to_write, "Wrote");
|
||||
pr_info("Image size after compression: %d kbytes\n",
|
||||
(atomic_read(&compressed_size) / 1024));
|
||||
} else {
|
||||
pr_err("Image saving failed: %d\n", ret);
|
||||
}
|
||||
|
||||
out_clean:
|
||||
hib_finish_batch(&hb);
|
||||
@ -899,7 +902,8 @@ out_clean:
|
||||
}
|
||||
vfree(data);
|
||||
}
|
||||
if (page) free_page((unsigned long)page);
|
||||
if (page)
|
||||
free_page((unsigned long)page);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -4,9 +4,12 @@
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <string.h>
|
||||
#include <linux/vfio.h>
|
||||
|
||||
#include <uapi/linux/types.h>
|
||||
#include <linux/iommufd.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/pci_regs.h>
|
||||
#include <linux/vfio.h>
|
||||
|
||||
#include "../../../kselftest.h"
|
||||
|
||||
@ -185,6 +188,13 @@ struct vfio_pci_device {
|
||||
struct vfio_pci_driver driver;
|
||||
};
|
||||
|
||||
struct iova_allocator {
|
||||
struct iommu_iova_range *ranges;
|
||||
u32 nranges;
|
||||
u32 range_idx;
|
||||
u64 range_offset;
|
||||
};
|
||||
|
||||
/*
|
||||
* Return the BDF string of the device that the test should use.
|
||||
*
|
||||
@ -206,6 +216,13 @@ struct vfio_pci_device *vfio_pci_device_init(const char *bdf, const char *iommu_
|
||||
void vfio_pci_device_cleanup(struct vfio_pci_device *device);
|
||||
void vfio_pci_device_reset(struct vfio_pci_device *device);
|
||||
|
||||
struct iommu_iova_range *vfio_pci_iova_ranges(struct vfio_pci_device *device,
|
||||
u32 *nranges);
|
||||
|
||||
struct iova_allocator *iova_allocator_init(struct vfio_pci_device *device);
|
||||
void iova_allocator_cleanup(struct iova_allocator *allocator);
|
||||
iova_t iova_allocator_alloc(struct iova_allocator *allocator, size_t size);
|
||||
|
||||
int __vfio_pci_dma_map(struct vfio_pci_device *device,
|
||||
struct vfio_dma_region *region);
|
||||
int __vfio_pci_dma_unmap(struct vfio_pci_device *device,
|
||||
|
||||
@ -12,11 +12,12 @@
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include <uapi/linux/types.h>
|
||||
#include <linux/iommufd.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/overflow.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/vfio.h>
|
||||
#include <linux/iommufd.h>
|
||||
|
||||
#include "../../../kselftest.h"
|
||||
#include <vfio_util.h>
|
||||
@ -29,6 +30,249 @@
|
||||
VFIO_ASSERT_EQ(__ret, 0, "ioctl(%s, %s, %s) returned %d\n", #_fd, #_op, #_arg, __ret); \
|
||||
} while (0)
|
||||
|
||||
static struct vfio_info_cap_header *next_cap_hdr(void *buf, u32 bufsz,
|
||||
u32 *cap_offset)
|
||||
{
|
||||
struct vfio_info_cap_header *hdr;
|
||||
|
||||
if (!*cap_offset)
|
||||
return NULL;
|
||||
|
||||
VFIO_ASSERT_LT(*cap_offset, bufsz);
|
||||
VFIO_ASSERT_GE(bufsz - *cap_offset, sizeof(*hdr));
|
||||
|
||||
hdr = (struct vfio_info_cap_header *)((u8 *)buf + *cap_offset);
|
||||
*cap_offset = hdr->next;
|
||||
|
||||
return hdr;
|
||||
}
|
||||
|
||||
static struct vfio_info_cap_header *vfio_iommu_info_cap_hdr(struct vfio_iommu_type1_info *info,
|
||||
u16 cap_id)
|
||||
{
|
||||
struct vfio_info_cap_header *hdr;
|
||||
u32 cap_offset = info->cap_offset;
|
||||
u32 max_depth;
|
||||
u32 depth = 0;
|
||||
|
||||
if (!(info->flags & VFIO_IOMMU_INFO_CAPS))
|
||||
return NULL;
|
||||
|
||||
if (cap_offset)
|
||||
VFIO_ASSERT_GE(cap_offset, sizeof(*info));
|
||||
|
||||
max_depth = (info->argsz - sizeof(*info)) / sizeof(*hdr);
|
||||
|
||||
while ((hdr = next_cap_hdr(info, info->argsz, &cap_offset))) {
|
||||
depth++;
|
||||
VFIO_ASSERT_LE(depth, max_depth, "Capability chain contains a cycle\n");
|
||||
|
||||
if (hdr->id == cap_id)
|
||||
return hdr;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Return buffer including capability chain, if present. Free with free() */
|
||||
static struct vfio_iommu_type1_info *vfio_iommu_get_info(struct vfio_pci_device *device)
|
||||
{
|
||||
struct vfio_iommu_type1_info *info;
|
||||
|
||||
info = malloc(sizeof(*info));
|
||||
VFIO_ASSERT_NOT_NULL(info);
|
||||
|
||||
*info = (struct vfio_iommu_type1_info) {
|
||||
.argsz = sizeof(*info),
|
||||
};
|
||||
|
||||
ioctl_assert(device->container_fd, VFIO_IOMMU_GET_INFO, info);
|
||||
VFIO_ASSERT_GE(info->argsz, sizeof(*info));
|
||||
|
||||
info = realloc(info, info->argsz);
|
||||
VFIO_ASSERT_NOT_NULL(info);
|
||||
|
||||
ioctl_assert(device->container_fd, VFIO_IOMMU_GET_INFO, info);
|
||||
VFIO_ASSERT_GE(info->argsz, sizeof(*info));
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return iova ranges for the device's container. Normalize vfio_iommu_type1 to
|
||||
* report iommufd's iommu_iova_range. Free with free().
|
||||
*/
|
||||
static struct iommu_iova_range *vfio_iommu_iova_ranges(struct vfio_pci_device *device,
|
||||
u32 *nranges)
|
||||
{
|
||||
struct vfio_iommu_type1_info_cap_iova_range *cap_range;
|
||||
struct vfio_iommu_type1_info *info;
|
||||
struct vfio_info_cap_header *hdr;
|
||||
struct iommu_iova_range *ranges = NULL;
|
||||
|
||||
info = vfio_iommu_get_info(device);
|
||||
hdr = vfio_iommu_info_cap_hdr(info, VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE);
|
||||
VFIO_ASSERT_NOT_NULL(hdr);
|
||||
|
||||
cap_range = container_of(hdr, struct vfio_iommu_type1_info_cap_iova_range, header);
|
||||
VFIO_ASSERT_GT(cap_range->nr_iovas, 0);
|
||||
|
||||
ranges = calloc(cap_range->nr_iovas, sizeof(*ranges));
|
||||
VFIO_ASSERT_NOT_NULL(ranges);
|
||||
|
||||
for (u32 i = 0; i < cap_range->nr_iovas; i++) {
|
||||
ranges[i] = (struct iommu_iova_range){
|
||||
.start = cap_range->iova_ranges[i].start,
|
||||
.last = cap_range->iova_ranges[i].end,
|
||||
};
|
||||
}
|
||||
|
||||
*nranges = cap_range->nr_iovas;
|
||||
|
||||
free(info);
|
||||
return ranges;
|
||||
}
|
||||
|
||||
/* Return iova ranges of the device's IOAS. Free with free() */
|
||||
static struct iommu_iova_range *iommufd_iova_ranges(struct vfio_pci_device *device,
|
||||
u32 *nranges)
|
||||
{
|
||||
struct iommu_iova_range *ranges;
|
||||
int ret;
|
||||
|
||||
struct iommu_ioas_iova_ranges query = {
|
||||
.size = sizeof(query),
|
||||
.ioas_id = device->ioas_id,
|
||||
};
|
||||
|
||||
ret = ioctl(device->iommufd, IOMMU_IOAS_IOVA_RANGES, &query);
|
||||
VFIO_ASSERT_EQ(ret, -1);
|
||||
VFIO_ASSERT_EQ(errno, EMSGSIZE);
|
||||
VFIO_ASSERT_GT(query.num_iovas, 0);
|
||||
|
||||
ranges = calloc(query.num_iovas, sizeof(*ranges));
|
||||
VFIO_ASSERT_NOT_NULL(ranges);
|
||||
|
||||
query.allowed_iovas = (uintptr_t)ranges;
|
||||
|
||||
ioctl_assert(device->iommufd, IOMMU_IOAS_IOVA_RANGES, &query);
|
||||
*nranges = query.num_iovas;
|
||||
|
||||
return ranges;
|
||||
}
|
||||
|
||||
static int iova_range_comp(const void *a, const void *b)
|
||||
{
|
||||
const struct iommu_iova_range *ra = a, *rb = b;
|
||||
|
||||
if (ra->start < rb->start)
|
||||
return -1;
|
||||
|
||||
if (ra->start > rb->start)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Return sorted IOVA ranges of the device. Free with free(). */
|
||||
struct iommu_iova_range *vfio_pci_iova_ranges(struct vfio_pci_device *device,
|
||||
u32 *nranges)
|
||||
{
|
||||
struct iommu_iova_range *ranges;
|
||||
|
||||
if (device->iommufd)
|
||||
ranges = iommufd_iova_ranges(device, nranges);
|
||||
else
|
||||
ranges = vfio_iommu_iova_ranges(device, nranges);
|
||||
|
||||
if (!ranges)
|
||||
return NULL;
|
||||
|
||||
VFIO_ASSERT_GT(*nranges, 0);
|
||||
|
||||
/* Sort and check that ranges are sane and non-overlapping */
|
||||
qsort(ranges, *nranges, sizeof(*ranges), iova_range_comp);
|
||||
VFIO_ASSERT_LT(ranges[0].start, ranges[0].last);
|
||||
|
||||
for (u32 i = 1; i < *nranges; i++) {
|
||||
VFIO_ASSERT_LT(ranges[i].start, ranges[i].last);
|
||||
VFIO_ASSERT_LT(ranges[i - 1].last, ranges[i].start);
|
||||
}
|
||||
|
||||
return ranges;
|
||||
}
|
||||
|
||||
struct iova_allocator *iova_allocator_init(struct vfio_pci_device *device)
|
||||
{
|
||||
struct iova_allocator *allocator;
|
||||
struct iommu_iova_range *ranges;
|
||||
u32 nranges;
|
||||
|
||||
ranges = vfio_pci_iova_ranges(device, &nranges);
|
||||
VFIO_ASSERT_NOT_NULL(ranges);
|
||||
|
||||
allocator = malloc(sizeof(*allocator));
|
||||
VFIO_ASSERT_NOT_NULL(allocator);
|
||||
|
||||
*allocator = (struct iova_allocator){
|
||||
.ranges = ranges,
|
||||
.nranges = nranges,
|
||||
.range_idx = 0,
|
||||
.range_offset = 0,
|
||||
};
|
||||
|
||||
return allocator;
|
||||
}
|
||||
|
||||
void iova_allocator_cleanup(struct iova_allocator *allocator)
|
||||
{
|
||||
free(allocator->ranges);
|
||||
free(allocator);
|
||||
}
|
||||
|
||||
iova_t iova_allocator_alloc(struct iova_allocator *allocator, size_t size)
|
||||
{
|
||||
VFIO_ASSERT_GT(size, 0, "Invalid size arg, zero\n");
|
||||
VFIO_ASSERT_EQ(size & (size - 1), 0, "Invalid size arg, non-power-of-2\n");
|
||||
|
||||
for (;;) {
|
||||
struct iommu_iova_range *range;
|
||||
iova_t iova, last;
|
||||
|
||||
VFIO_ASSERT_LT(allocator->range_idx, allocator->nranges,
|
||||
"IOVA allocator out of space\n");
|
||||
|
||||
range = &allocator->ranges[allocator->range_idx];
|
||||
iova = range->start + allocator->range_offset;
|
||||
|
||||
/* Check for sufficient space at the current offset */
|
||||
if (check_add_overflow(iova, size - 1, &last) ||
|
||||
last > range->last)
|
||||
goto next_range;
|
||||
|
||||
/* Align iova to size */
|
||||
iova = last & ~(size - 1);
|
||||
|
||||
/* Check for sufficient space at the aligned iova */
|
||||
if (check_add_overflow(iova, size - 1, &last) ||
|
||||
last > range->last)
|
||||
goto next_range;
|
||||
|
||||
if (last == range->last) {
|
||||
allocator->range_idx++;
|
||||
allocator->range_offset = 0;
|
||||
} else {
|
||||
allocator->range_offset = last - range->start + 1;
|
||||
}
|
||||
|
||||
return iova;
|
||||
|
||||
next_range:
|
||||
allocator->range_idx++;
|
||||
allocator->range_offset = 0;
|
||||
}
|
||||
}
|
||||
|
||||
iova_t __to_iova(struct vfio_pci_device *device, void *vaddr)
|
||||
{
|
||||
struct vfio_dma_region *region;
|
||||
|
||||
@ -3,6 +3,8 @@
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <uapi/linux/types.h>
|
||||
#include <linux/iommufd.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/sizes.h>
|
||||
@ -93,6 +95,7 @@ static int iommu_mapping_get(const char *bdf, u64 iova,
|
||||
|
||||
FIXTURE(vfio_dma_mapping_test) {
|
||||
struct vfio_pci_device *device;
|
||||
struct iova_allocator *iova_allocator;
|
||||
};
|
||||
|
||||
FIXTURE_VARIANT(vfio_dma_mapping_test) {
|
||||
@ -117,10 +120,12 @@ FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB |
|
||||
FIXTURE_SETUP(vfio_dma_mapping_test)
|
||||
{
|
||||
self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
|
||||
self->iova_allocator = iova_allocator_init(self->device);
|
||||
}
|
||||
|
||||
FIXTURE_TEARDOWN(vfio_dma_mapping_test)
|
||||
{
|
||||
iova_allocator_cleanup(self->iova_allocator);
|
||||
vfio_pci_device_cleanup(self->device);
|
||||
}
|
||||
|
||||
@ -142,7 +147,7 @@ TEST_F(vfio_dma_mapping_test, dma_map_unmap)
|
||||
else
|
||||
ASSERT_NE(region.vaddr, MAP_FAILED);
|
||||
|
||||
region.iova = (u64)region.vaddr;
|
||||
region.iova = iova_allocator_alloc(self->iova_allocator, size);
|
||||
region.size = size;
|
||||
|
||||
vfio_pci_dma_map(self->device, ®ion);
|
||||
@ -219,7 +224,10 @@ FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES();
|
||||
FIXTURE_SETUP(vfio_dma_map_limit_test)
|
||||
{
|
||||
struct vfio_dma_region *region = &self->region;
|
||||
struct iommu_iova_range *ranges;
|
||||
u64 region_size = getpagesize();
|
||||
iova_t last_iova;
|
||||
u32 nranges;
|
||||
|
||||
/*
|
||||
* Over-allocate mmap by double the size to provide enough backing vaddr
|
||||
@ -232,8 +240,13 @@ FIXTURE_SETUP(vfio_dma_map_limit_test)
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
ASSERT_NE(region->vaddr, MAP_FAILED);
|
||||
|
||||
/* One page prior to the end of address space */
|
||||
region->iova = ~(iova_t)0 & ~(region_size - 1);
|
||||
ranges = vfio_pci_iova_ranges(self->device, &nranges);
|
||||
VFIO_ASSERT_NOT_NULL(ranges);
|
||||
last_iova = ranges[nranges - 1].last;
|
||||
free(ranges);
|
||||
|
||||
/* One page prior to the last iova */
|
||||
region->iova = last_iova & ~(region_size - 1);
|
||||
region->size = region_size;
|
||||
}
|
||||
|
||||
@ -276,6 +289,7 @@ TEST_F(vfio_dma_map_limit_test, overflow)
|
||||
struct vfio_dma_region *region = &self->region;
|
||||
int rc;
|
||||
|
||||
region->iova = ~(iova_t)0 & ~(region->size - 1);
|
||||
region->size = self->mmap_size;
|
||||
|
||||
rc = __vfio_pci_dma_map(self->device, region);
|
||||
|
||||
@ -19,6 +19,7 @@ static const char *device_bdf;
|
||||
} while (0)
|
||||
|
||||
static void region_setup(struct vfio_pci_device *device,
|
||||
struct iova_allocator *iova_allocator,
|
||||
struct vfio_dma_region *region, u64 size)
|
||||
{
|
||||
const int flags = MAP_SHARED | MAP_ANONYMOUS;
|
||||
@ -29,7 +30,7 @@ static void region_setup(struct vfio_pci_device *device,
|
||||
VFIO_ASSERT_NE(vaddr, MAP_FAILED);
|
||||
|
||||
region->vaddr = vaddr;
|
||||
region->iova = (u64)vaddr;
|
||||
region->iova = iova_allocator_alloc(iova_allocator, size);
|
||||
region->size = size;
|
||||
|
||||
vfio_pci_dma_map(device, region);
|
||||
@ -44,6 +45,7 @@ static void region_teardown(struct vfio_pci_device *device,
|
||||
|
||||
FIXTURE(vfio_pci_driver_test) {
|
||||
struct vfio_pci_device *device;
|
||||
struct iova_allocator *iova_allocator;
|
||||
struct vfio_dma_region memcpy_region;
|
||||
void *vaddr;
|
||||
int msi_fd;
|
||||
@ -72,14 +74,15 @@ FIXTURE_SETUP(vfio_pci_driver_test)
|
||||
struct vfio_pci_driver *driver;
|
||||
|
||||
self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
|
||||
self->iova_allocator = iova_allocator_init(self->device);
|
||||
|
||||
driver = &self->device->driver;
|
||||
|
||||
region_setup(self->device, &self->memcpy_region, SZ_1G);
|
||||
region_setup(self->device, &driver->region, SZ_2M);
|
||||
region_setup(self->device, self->iova_allocator, &self->memcpy_region, SZ_1G);
|
||||
region_setup(self->device, self->iova_allocator, &driver->region, SZ_2M);
|
||||
|
||||
/* Any IOVA that doesn't overlap memcpy_region and driver->region. */
|
||||
self->unmapped_iova = 8UL * SZ_1G;
|
||||
self->unmapped_iova = iova_allocator_alloc(self->iova_allocator, SZ_1G);
|
||||
|
||||
vfio_pci_driver_init(self->device);
|
||||
self->msi_fd = self->device->msi_eventfds[driver->msi];
|
||||
@ -108,6 +111,7 @@ FIXTURE_TEARDOWN(vfio_pci_driver_test)
|
||||
region_teardown(self->device, &self->memcpy_region);
|
||||
region_teardown(self->device, &driver->region);
|
||||
|
||||
iova_allocator_cleanup(self->iova_allocator);
|
||||
vfio_pci_device_cleanup(self->device);
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user