mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-12 01:20:14 +00:00
selftests/mm/hmm-tests: partial unmap, mremap and anon_write tests
Add partial unmap test case which munmaps memory while in the device. Add tests exercising mremap on faulted-in memory (CPU and GPU) at various offsets and verify correctness. Update anon_write_child to read device memory after fork verifying this flow works in the kernel. Both THP and non-THP cases are updated. Link: https://lkml.kernel.org/r/20251001065707.920170-15-balbirs@nvidia.com Signed-off-by: Balbir Singh <balbirs@nvidia.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: Zi Yan <ziy@nvidia.com> Cc: Joshua Hahn <joshua.hahnjy@gmail.com> Cc: Rakie Kim <rakie.kim@sk.com> Cc: Byungchul Park <byungchul@sk.com> Cc: Gregory Price <gourry@gourry.net> Cc: Ying Huang <ying.huang@linux.alibaba.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com> Cc: Nico Pache <npache@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Dev Jain <dev.jain@arm.com> Cc: Barry Song <baohua@kernel.org> Cc: Lyude Paul <lyude@redhat.com> Cc: Danilo Krummrich <dakr@kernel.org> Cc: David Airlie <airlied@gmail.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Mika Penttilä <mpenttil@redhat.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: Francois Dugast <francois.dugast@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
519071529d
commit
24c2c5b8ff
@ -50,6 +50,8 @@ enum {
|
||||
HMM_COHERENCE_DEVICE_TWO,
|
||||
};
|
||||
|
||||
#define ONEKB (1 << 10)
|
||||
#define ONEMEG (1 << 20)
|
||||
#define TWOMEG (1 << 21)
|
||||
#define HMM_BUFFER_SIZE (1024 << 12)
|
||||
#define HMM_PATH_MAX 64
|
||||
@ -525,6 +527,8 @@ TEST_F(hmm, anon_write_prot)
|
||||
/*
|
||||
* Check that a device writing an anonymous private mapping
|
||||
* will copy-on-write if a child process inherits the mapping.
|
||||
*
|
||||
* Also verifies after fork() memory the device can be read by child.
|
||||
*/
|
||||
TEST_F(hmm, anon_write_child)
|
||||
{
|
||||
@ -532,72 +536,101 @@ TEST_F(hmm, anon_write_child)
|
||||
unsigned long npages;
|
||||
unsigned long size;
|
||||
unsigned long i;
|
||||
void *old_ptr;
|
||||
void *map;
|
||||
int *ptr;
|
||||
pid_t pid;
|
||||
int child_fd;
|
||||
int ret;
|
||||
int ret, use_thp, migrate;
|
||||
|
||||
npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
|
||||
ASSERT_NE(npages, 0);
|
||||
size = npages << self->page_shift;
|
||||
for (migrate = 0; migrate < 2; ++migrate) {
|
||||
for (use_thp = 0; use_thp < 2; ++use_thp) {
|
||||
npages = ALIGN(use_thp ? TWOMEG : HMM_BUFFER_SIZE,
|
||||
self->page_size) >> self->page_shift;
|
||||
ASSERT_NE(npages, 0);
|
||||
size = npages << self->page_shift;
|
||||
|
||||
buffer = malloc(sizeof(*buffer));
|
||||
ASSERT_NE(buffer, NULL);
|
||||
buffer = malloc(sizeof(*buffer));
|
||||
ASSERT_NE(buffer, NULL);
|
||||
|
||||
buffer->fd = -1;
|
||||
buffer->size = size;
|
||||
buffer->mirror = malloc(size);
|
||||
ASSERT_NE(buffer->mirror, NULL);
|
||||
buffer->fd = -1;
|
||||
buffer->size = size * 2;
|
||||
buffer->mirror = malloc(size);
|
||||
ASSERT_NE(buffer->mirror, NULL);
|
||||
|
||||
buffer->ptr = mmap(NULL, size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS,
|
||||
buffer->fd, 0);
|
||||
ASSERT_NE(buffer->ptr, MAP_FAILED);
|
||||
buffer->ptr = mmap(NULL, size * 2,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS,
|
||||
buffer->fd, 0);
|
||||
ASSERT_NE(buffer->ptr, MAP_FAILED);
|
||||
|
||||
/* Initialize buffer->ptr so we can tell if it is written. */
|
||||
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
|
||||
ptr[i] = i;
|
||||
old_ptr = buffer->ptr;
|
||||
if (use_thp) {
|
||||
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
|
||||
ret = madvise(map, size, MADV_HUGEPAGE);
|
||||
ASSERT_EQ(ret, 0);
|
||||
buffer->ptr = map;
|
||||
}
|
||||
|
||||
/* Initialize data that the device will write to buffer->ptr. */
|
||||
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
|
||||
ptr[i] = -i;
|
||||
/* Initialize buffer->ptr so we can tell if it is written. */
|
||||
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
|
||||
ptr[i] = i;
|
||||
|
||||
pid = fork();
|
||||
if (pid == -1)
|
||||
ASSERT_EQ(pid, 0);
|
||||
if (pid != 0) {
|
||||
waitpid(pid, &ret, 0);
|
||||
ASSERT_EQ(WIFEXITED(ret), 1);
|
||||
/* Initialize data that the device will write to buffer->ptr. */
|
||||
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
|
||||
ptr[i] = -i;
|
||||
|
||||
/* Check that the parent's buffer did not change. */
|
||||
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
|
||||
ASSERT_EQ(ptr[i], i);
|
||||
return;
|
||||
if (migrate) {
|
||||
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
|
||||
ASSERT_EQ(ret, 0);
|
||||
ASSERT_EQ(buffer->cpages, npages);
|
||||
|
||||
}
|
||||
|
||||
pid = fork();
|
||||
if (pid == -1)
|
||||
ASSERT_EQ(pid, 0);
|
||||
if (pid != 0) {
|
||||
waitpid(pid, &ret, 0);
|
||||
ASSERT_EQ(WIFEXITED(ret), 1);
|
||||
|
||||
/* Check that the parent's buffer did not change. */
|
||||
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
|
||||
ASSERT_EQ(ptr[i], i);
|
||||
|
||||
buffer->ptr = old_ptr;
|
||||
hmm_buffer_free(buffer);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Check that we see the parent's values. */
|
||||
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
|
||||
ASSERT_EQ(ptr[i], i);
|
||||
if (!migrate) {
|
||||
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
|
||||
ASSERT_EQ(ptr[i], -i);
|
||||
}
|
||||
|
||||
/* The child process needs its own mirror to its own mm. */
|
||||
child_fd = hmm_open(0);
|
||||
ASSERT_GE(child_fd, 0);
|
||||
|
||||
/* Simulate a device writing system memory. */
|
||||
ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
|
||||
ASSERT_EQ(ret, 0);
|
||||
ASSERT_EQ(buffer->cpages, npages);
|
||||
ASSERT_EQ(buffer->faults, 1);
|
||||
|
||||
/* Check what the device wrote. */
|
||||
if (!migrate) {
|
||||
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
|
||||
ASSERT_EQ(ptr[i], -i);
|
||||
}
|
||||
|
||||
close(child_fd);
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
/* Check that we see the parent's values. */
|
||||
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
|
||||
ASSERT_EQ(ptr[i], i);
|
||||
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
|
||||
ASSERT_EQ(ptr[i], -i);
|
||||
|
||||
/* The child process needs its own mirror to its own mm. */
|
||||
child_fd = hmm_open(0);
|
||||
ASSERT_GE(child_fd, 0);
|
||||
|
||||
/* Simulate a device writing system memory. */
|
||||
ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
|
||||
ASSERT_EQ(ret, 0);
|
||||
ASSERT_EQ(buffer->cpages, npages);
|
||||
ASSERT_EQ(buffer->faults, 1);
|
||||
|
||||
/* Check what the device wrote. */
|
||||
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
|
||||
ASSERT_EQ(ptr[i], -i);
|
||||
|
||||
close(child_fd);
|
||||
exit(0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2289,6 +2322,165 @@ TEST_F(hmm, migrate_anon_huge_fault)
|
||||
hmm_buffer_free(buffer);
|
||||
}
|
||||
|
||||
/*
|
||||
* Migrate memory and fault back to sysmem after partially unmapping.
|
||||
*/
|
||||
TEST_F(hmm, migrate_partial_unmap_fault)
|
||||
{
|
||||
struct hmm_buffer *buffer;
|
||||
unsigned long npages;
|
||||
unsigned long size = TWOMEG;
|
||||
unsigned long i;
|
||||
void *old_ptr;
|
||||
void *map;
|
||||
int *ptr;
|
||||
int ret, j, use_thp;
|
||||
int offsets[] = { 0, 512 * ONEKB, ONEMEG };
|
||||
|
||||
for (use_thp = 0; use_thp < 2; ++use_thp) {
|
||||
for (j = 0; j < ARRAY_SIZE(offsets); ++j) {
|
||||
buffer = malloc(sizeof(*buffer));
|
||||
ASSERT_NE(buffer, NULL);
|
||||
|
||||
buffer->fd = -1;
|
||||
buffer->size = 2 * size;
|
||||
buffer->mirror = malloc(size);
|
||||
ASSERT_NE(buffer->mirror, NULL);
|
||||
memset(buffer->mirror, 0xFF, size);
|
||||
|
||||
buffer->ptr = mmap(NULL, 2 * size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS,
|
||||
buffer->fd, 0);
|
||||
ASSERT_NE(buffer->ptr, MAP_FAILED);
|
||||
|
||||
npages = size >> self->page_shift;
|
||||
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
|
||||
if (use_thp)
|
||||
ret = madvise(map, size, MADV_HUGEPAGE);
|
||||
else
|
||||
ret = madvise(map, size, MADV_NOHUGEPAGE);
|
||||
ASSERT_EQ(ret, 0);
|
||||
old_ptr = buffer->ptr;
|
||||
buffer->ptr = map;
|
||||
|
||||
/* Initialize buffer in system memory. */
|
||||
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
|
||||
ptr[i] = i;
|
||||
|
||||
/* Migrate memory to device. */
|
||||
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
|
||||
ASSERT_EQ(ret, 0);
|
||||
ASSERT_EQ(buffer->cpages, npages);
|
||||
|
||||
/* Check what the device read. */
|
||||
for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
|
||||
ASSERT_EQ(ptr[i], i);
|
||||
|
||||
munmap(buffer->ptr + offsets[j], ONEMEG);
|
||||
|
||||
/* Fault pages back to system memory and check them. */
|
||||
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
|
||||
if (i * sizeof(int) < offsets[j] ||
|
||||
i * sizeof(int) >= offsets[j] + ONEMEG)
|
||||
ASSERT_EQ(ptr[i], i);
|
||||
|
||||
buffer->ptr = old_ptr;
|
||||
hmm_buffer_free(buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(hmm, migrate_remap_fault)
|
||||
{
|
||||
struct hmm_buffer *buffer;
|
||||
unsigned long npages;
|
||||
unsigned long size = TWOMEG;
|
||||
unsigned long i;
|
||||
void *old_ptr, *new_ptr = NULL;
|
||||
void *map;
|
||||
int *ptr;
|
||||
int ret, j, use_thp, dont_unmap, before;
|
||||
int offsets[] = { 0, 512 * ONEKB, ONEMEG };
|
||||
|
||||
for (before = 0; before < 2; ++before) {
|
||||
for (dont_unmap = 0; dont_unmap < 2; ++dont_unmap) {
|
||||
for (use_thp = 0; use_thp < 2; ++use_thp) {
|
||||
for (j = 0; j < ARRAY_SIZE(offsets); ++j) {
|
||||
int flags = MREMAP_MAYMOVE | MREMAP_FIXED;
|
||||
|
||||
if (dont_unmap)
|
||||
flags |= MREMAP_DONTUNMAP;
|
||||
|
||||
buffer = malloc(sizeof(*buffer));
|
||||
ASSERT_NE(buffer, NULL);
|
||||
|
||||
buffer->fd = -1;
|
||||
buffer->size = 8 * size;
|
||||
buffer->mirror = malloc(size);
|
||||
ASSERT_NE(buffer->mirror, NULL);
|
||||
memset(buffer->mirror, 0xFF, size);
|
||||
|
||||
buffer->ptr = mmap(NULL, buffer->size,
|
||||
PROT_READ | PROT_WRITE,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS,
|
||||
buffer->fd, 0);
|
||||
ASSERT_NE(buffer->ptr, MAP_FAILED);
|
||||
|
||||
npages = size >> self->page_shift;
|
||||
map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
|
||||
if (use_thp)
|
||||
ret = madvise(map, size, MADV_HUGEPAGE);
|
||||
else
|
||||
ret = madvise(map, size, MADV_NOHUGEPAGE);
|
||||
ASSERT_EQ(ret, 0);
|
||||
old_ptr = buffer->ptr;
|
||||
munmap(map + size, size * 2);
|
||||
buffer->ptr = map;
|
||||
|
||||
/* Initialize buffer in system memory. */
|
||||
for (i = 0, ptr = buffer->ptr;
|
||||
i < size / sizeof(*ptr); ++i)
|
||||
ptr[i] = i;
|
||||
|
||||
if (before) {
|
||||
new_ptr = mremap((void *)map, size, size, flags,
|
||||
map + size + offsets[j]);
|
||||
ASSERT_NE(new_ptr, MAP_FAILED);
|
||||
buffer->ptr = new_ptr;
|
||||
}
|
||||
|
||||
/* Migrate memory to device. */
|
||||
ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
|
||||
ASSERT_EQ(ret, 0);
|
||||
ASSERT_EQ(buffer->cpages, npages);
|
||||
|
||||
/* Check what the device read. */
|
||||
for (i = 0, ptr = buffer->mirror;
|
||||
i < size / sizeof(*ptr); ++i)
|
||||
ASSERT_EQ(ptr[i], i);
|
||||
|
||||
if (!before) {
|
||||
new_ptr = mremap((void *)map, size, size, flags,
|
||||
map + size + offsets[j]);
|
||||
ASSERT_NE(new_ptr, MAP_FAILED);
|
||||
buffer->ptr = new_ptr;
|
||||
}
|
||||
|
||||
/* Fault pages back to system memory and check them. */
|
||||
for (i = 0, ptr = buffer->ptr;
|
||||
i < size / sizeof(*ptr); ++i)
|
||||
ASSERT_EQ(ptr[i], i);
|
||||
|
||||
munmap(new_ptr, size);
|
||||
buffer->ptr = old_ptr;
|
||||
hmm_buffer_free(buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Migrate private anonymous huge page with allocation errors.
|
||||
*/
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user