1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

selftests/mm/hmm-tests: new tests for zone device THP migration

Add new tests for migrating anon THP pages, including anon_huge,
anon_huge_zero and error cases involving forced splitting of pages during
migration.

Link: https://lkml.kernel.org/r/20251001065707.920170-14-balbirs@nvidia.com
Signed-off-by: Balbir Singh <balbirs@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Ying Huang <ying.huang@linux.alibaba.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: David Airlie <airlied@gmail.com>
Cc: Simona Vetter <simona@ffwll.ch>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mika Penttilä <mpenttil@redhat.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Balbir Singh 2025-10-01 16:57:04 +10:00 committed by Andrew Morton
parent aa3ade4295
commit 519071529d

View File

@ -2055,4 +2055,414 @@ TEST_F(hmm, hmm_cow_in_device)
hmm_buffer_free(buffer);
}
/*
* Migrate private anonymous huge empty page.
*/
TEST_F(hmm, migrate_anon_huge_empty)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
void *old_ptr;
void *map;
int *ptr;
int ret;
size = TWOMEG;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = 2 * size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
memset(buffer->mirror, 0xFF, size);
buffer->ptr = mmap(NULL, 2 * size,
PROT_READ,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
npages = size >> self->page_shift;
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
ret = madvise(map, size, MADV_HUGEPAGE);
ASSERT_EQ(ret, 0);
old_ptr = buffer->ptr;
buffer->ptr = map;
/* Migrate memory to device. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], 0);
buffer->ptr = old_ptr;
hmm_buffer_free(buffer);
}
/*
* Migrate private anonymous huge zero page.
*/
TEST_F(hmm, migrate_anon_huge_zero)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
void *old_ptr;
void *map;
int *ptr;
int ret;
int val;
size = TWOMEG;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = 2 * size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
memset(buffer->mirror, 0xFF, size);
buffer->ptr = mmap(NULL, 2 * size,
PROT_READ,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
npages = size >> self->page_shift;
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
ret = madvise(map, size, MADV_HUGEPAGE);
ASSERT_EQ(ret, 0);
old_ptr = buffer->ptr;
buffer->ptr = map;
/* Initialize a read-only zero huge page. */
val = *(int *)buffer->ptr;
ASSERT_EQ(val, 0);
/* Migrate memory to device. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], 0);
/* Fault pages back to system memory and check them. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) {
ASSERT_EQ(ptr[i], 0);
/* If it asserts once, it probably will 500,000 times */
if (ptr[i] != 0)
break;
}
buffer->ptr = old_ptr;
hmm_buffer_free(buffer);
}
/*
* Migrate private anonymous huge page and free.
*/
TEST_F(hmm, migrate_anon_huge_free)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
void *old_ptr;
void *map;
int *ptr;
int ret;
size = TWOMEG;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = 2 * size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
memset(buffer->mirror, 0xFF, size);
buffer->ptr = mmap(NULL, 2 * size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
npages = size >> self->page_shift;
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
ret = madvise(map, size, MADV_HUGEPAGE);
ASSERT_EQ(ret, 0);
old_ptr = buffer->ptr;
buffer->ptr = map;
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Migrate memory to device. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Try freeing it. */
ret = madvise(map, size, MADV_FREE);
ASSERT_EQ(ret, 0);
buffer->ptr = old_ptr;
hmm_buffer_free(buffer);
}
/*
* Migrate private anonymous huge page and fault back to sysmem.
*/
TEST_F(hmm, migrate_anon_huge_fault)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
void *old_ptr;
void *map;
int *ptr;
int ret;
size = TWOMEG;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = 2 * size;
buffer->mirror = malloc(size);
ASSERT_NE(buffer->mirror, NULL);
memset(buffer->mirror, 0xFF, size);
buffer->ptr = mmap(NULL, 2 * size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
buffer->fd, 0);
ASSERT_NE(buffer->ptr, MAP_FAILED);
npages = size >> self->page_shift;
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
ret = madvise(map, size, MADV_HUGEPAGE);
ASSERT_EQ(ret, 0);
old_ptr = buffer->ptr;
buffer->ptr = map;
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Migrate memory to device. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
/* Fault pages back to system memory and check them. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
buffer->ptr = old_ptr;
hmm_buffer_free(buffer);
}
/*
* Migrate private anonymous huge page with allocation errors.
*/
TEST_F(hmm, migrate_anon_huge_err)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
void *old_ptr;
void *map;
int *ptr;
int ret;
size = TWOMEG;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = 2 * size;
buffer->mirror = malloc(2 * size);
ASSERT_NE(buffer->mirror, NULL);
memset(buffer->mirror, 0xFF, 2 * size);
old_ptr = mmap(NULL, 2 * size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
ASSERT_NE(old_ptr, MAP_FAILED);
npages = size >> self->page_shift;
map = (void *)ALIGN((uintptr_t)old_ptr, size);
ret = madvise(map, size, MADV_HUGEPAGE);
ASSERT_EQ(ret, 0);
buffer->ptr = map;
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Migrate memory to device but force a THP allocation error. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_FLAGS, buffer,
HMM_DMIRROR_FLAG_FAIL_ALLOC);
ASSERT_EQ(ret, 0);
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) {
ASSERT_EQ(ptr[i], i);
if (ptr[i] != i)
break;
}
/* Try faulting back a single (PAGE_SIZE) page. */
ptr = buffer->ptr;
ASSERT_EQ(ptr[2048], 2048);
/* unmap and remap the region to reset things. */
ret = munmap(old_ptr, 2 * size);
ASSERT_EQ(ret, 0);
old_ptr = mmap(NULL, 2 * size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
ASSERT_NE(old_ptr, MAP_FAILED);
map = (void *)ALIGN((uintptr_t)old_ptr, size);
ret = madvise(map, size, MADV_HUGEPAGE);
ASSERT_EQ(ret, 0);
buffer->ptr = map;
/* Initialize buffer in system memory. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ptr[i] = i;
/* Migrate THP to device. */
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/*
* Force an allocation error when faulting back a THP resident in the
* device.
*/
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_FLAGS, buffer,
HMM_DMIRROR_FLAG_FAIL_ALLOC);
ASSERT_EQ(ret, 0);
ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ptr = buffer->ptr;
ASSERT_EQ(ptr[2048], 2048);
buffer->ptr = old_ptr;
hmm_buffer_free(buffer);
}
/*
* Migrate private anonymous huge zero page with allocation errors.
*/
TEST_F(hmm, migrate_anon_huge_zero_err)
{
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
unsigned long i;
void *old_ptr;
void *map;
int *ptr;
int ret;
size = TWOMEG;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
buffer->fd = -1;
buffer->size = 2 * size;
buffer->mirror = malloc(2 * size);
ASSERT_NE(buffer->mirror, NULL);
memset(buffer->mirror, 0xFF, 2 * size);
old_ptr = mmap(NULL, 2 * size, PROT_READ,
MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
ASSERT_NE(old_ptr, MAP_FAILED);
npages = size >> self->page_shift;
map = (void *)ALIGN((uintptr_t)old_ptr, size);
ret = madvise(map, size, MADV_HUGEPAGE);
ASSERT_EQ(ret, 0);
buffer->ptr = map;
/* Migrate memory to device but force a THP allocation error. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_FLAGS, buffer,
HMM_DMIRROR_FLAG_FAIL_ALLOC);
ASSERT_EQ(ret, 0);
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Check what the device read. */
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], 0);
/* Try faulting back a single (PAGE_SIZE) page. */
ptr = buffer->ptr;
ASSERT_EQ(ptr[2048], 0);
/* unmap and remap the region to reset things. */
ret = munmap(old_ptr, 2 * size);
ASSERT_EQ(ret, 0);
old_ptr = mmap(NULL, 2 * size, PROT_READ,
MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0);
ASSERT_NE(old_ptr, MAP_FAILED);
map = (void *)ALIGN((uintptr_t)old_ptr, size);
ret = madvise(map, size, MADV_HUGEPAGE);
ASSERT_EQ(ret, 0);
buffer->ptr = map;
/* Initialize buffer in system memory (zero THP page). */
ret = ptr[0];
ASSERT_EQ(ret, 0);
/* Migrate memory to device but force a THP allocation error. */
ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_FLAGS, buffer,
HMM_DMIRROR_FLAG_FAIL_ALLOC);
ASSERT_EQ(ret, 0);
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
ASSERT_EQ(ret, 0);
ASSERT_EQ(buffer->cpages, npages);
/* Fault the device memory back and check it. */
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], 0);
buffer->ptr = old_ptr;
hmm_buffer_free(buffer);
}
TEST_HARNESS_MAIN