mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-12 01:20:14 +00:00
drm fixes for 6.19-rc4
shmem: - docs and MODULE_LICENSE fix xe: - Ensure svm device memory is idle before migration completes - Fix a SVM debug printout - Use READ_ONCE() / WRITE_ONCE() for g2h_fence i915: - Fix eb_lookup_vmas() failure path nouveau: - fix prepare_fb warnings imagination: - prevent export of protected objects -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmlXQY4ACgkQDHTzWXnE hr7LEQ//bl5D0Dk2AGef3J6ZdE5aj+BfPYhcZuxwREtcsoGFZ3yL3YyWqqYVDHQh V3La32pZaI6MdjUsWnRBJvDHYyXnnIDZDMi0pTqcFrYIOHgS1Rvp3ONF8sL1Pq5N Q09j6DniyD8o2S27qbRbiUD6jYrfxFBZ4l6HGpoz8bkODnzMk6am1LwPd22KjRta Ddti6Mhzj5ZmYsuWBKDCT8bBcQjQnlvEuxySakctaj4wX8WpCRPXLluPjkLrikDZ CyywAoZM6c8DesGOMO4gy2GWxtYdprk9oyl9oEL3NFSI6R65RW1ivgmyq08DADQ9 jgPr6FHvIwgfUOVsvyaLZrEt/hvdAX0nYYaMvwi1HsdoEke8wPkbV161/2AQtu9R TWMfh4zvGni7bu3GsSv6SvUjCqsXQZ1i/tCNU45kqlqAaHN4m6rK36TmMytenZik qinjHeelvYZhkxfo+xlCl5CestDHdWqfAWWvkJ0kNuedhyy+M+6aOhX1h45F1jJ4 F8I3ZXERLbLIvVm0IaXwZO9VEOiDJirF932of4PpcYHp9sp3YPJcB3SBCyfsFOtv rINN/h9wyp29H0UCCSW4k6WGPwmEV4oEtYkrkesqFnrJy97R3XrUC6/zqPtV4nnN d3F6cmuleLm+4VZsfQ7zknXb4cIt5kYCapd7YBtEn1BkU0Vk/+M= =bYco -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2026-01-02' of https://gitlab.freedesktop.org/drm/kernel Pull drm fixes from Dave Airlie: "Happy New Year, jetlagged fixes from me, still pretty quiet, xe is most of this, with i915/nouveau/imagination fixes and some shmem cleanups. shmem: - docs and MODULE_LICENSE fix xe: - Ensure svm device memory is idle before migration completes - Fix a SVM debug printout - Use READ_ONCE() / WRITE_ONCE() for g2h_fence i915: - Fix eb_lookup_vmas() failure path nouveau: - fix prepare_fb warnings imagination: - prevent export of protected objects" * tag 'drm-fixes-2026-01-02' of https://gitlab.freedesktop.org/drm/kernel: drm/i915/gem: Zero-initialize the eb.vma array in i915_gem_do_execbuffer drm/xe/guc: READ/WRITE_ONCE g2h_fence->done drm/pagemap, drm/xe: Ensure that the devmem allocation is idle before use drm/xe/svm: Fix a debug printout drm/gem-shmem: Fix the MODULE_LICENSE() string drm/gem-shmem: Fix typos in documentation drm/nouveau/dispnv50: Don't call drm_atomic_get_crtc_state() in prepare_fb drm/imagination: Disallow exporting of PM/FW protected objects
This commit is contained in:
commit
9b04368044
@ -96,7 +96,8 @@ err_release:
|
||||
/**
|
||||
* drm_gem_shmem_init - Initialize an allocated object.
|
||||
* @dev: DRM device
|
||||
* @obj: The allocated shmem GEM object.
|
||||
* @shmem: The allocated shmem GEM object.
|
||||
* @size: Buffer size in bytes
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, or a negative error code on failure.
|
||||
@ -895,4 +896,4 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
|
||||
|
||||
MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
|
||||
MODULE_IMPORT_NS("DMA_BUF");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
@ -3,6 +3,7 @@
|
||||
* Copyright © 2024-2025 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/dma-fence.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/migrate.h>
|
||||
#include <linux/pagemap.h>
|
||||
@ -408,10 +409,14 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
|
||||
drm_pagemap_get_devmem_page(page, zdd);
|
||||
}
|
||||
|
||||
err = ops->copy_to_devmem(pages, pagemap_addr, npages);
|
||||
err = ops->copy_to_devmem(pages, pagemap_addr, npages,
|
||||
devmem_allocation->pre_migrate_fence);
|
||||
if (err)
|
||||
goto err_finalize;
|
||||
|
||||
dma_fence_put(devmem_allocation->pre_migrate_fence);
|
||||
devmem_allocation->pre_migrate_fence = NULL;
|
||||
|
||||
/* Upon success bind devmem allocation to range and zdd */
|
||||
devmem_allocation->timeslice_expiration = get_jiffies_64() +
|
||||
msecs_to_jiffies(timeslice_ms);
|
||||
@ -596,7 +601,7 @@ retry:
|
||||
for (i = 0; i < npages; ++i)
|
||||
pages[i] = migrate_pfn_to_page(src[i]);
|
||||
|
||||
err = ops->copy_to_ram(pages, pagemap_addr, npages);
|
||||
err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
|
||||
if (err)
|
||||
goto err_finalize;
|
||||
|
||||
@ -732,7 +737,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
|
||||
for (i = 0; i < npages; ++i)
|
||||
pages[i] = migrate_pfn_to_page(migrate.src[i]);
|
||||
|
||||
err = ops->copy_to_ram(pages, pagemap_addr, npages);
|
||||
err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
|
||||
if (err)
|
||||
goto err_finalize;
|
||||
|
||||
@ -813,11 +818,14 @@ EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get);
|
||||
* @ops: Pointer to the operations structure for GPU SVM device memory
|
||||
* @dpagemap: The struct drm_pagemap we're allocating from.
|
||||
* @size: Size of device memory allocation
|
||||
* @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts.
|
||||
* (May be NULL).
|
||||
*/
|
||||
void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
|
||||
struct device *dev, struct mm_struct *mm,
|
||||
const struct drm_pagemap_devmem_ops *ops,
|
||||
struct drm_pagemap *dpagemap, size_t size)
|
||||
struct drm_pagemap *dpagemap, size_t size,
|
||||
struct dma_fence *pre_migrate_fence)
|
||||
{
|
||||
init_completion(&devmem_allocation->detached);
|
||||
devmem_allocation->dev = dev;
|
||||
@ -825,6 +833,7 @@ void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
|
||||
devmem_allocation->ops = ops;
|
||||
devmem_allocation->dpagemap = dpagemap;
|
||||
devmem_allocation->size = size;
|
||||
devmem_allocation->pre_migrate_fence = pre_migrate_fence;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
|
||||
|
||||
|
||||
@ -951,13 +951,13 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
|
||||
vma = eb_lookup_vma(eb, eb->exec[i].handle);
|
||||
if (IS_ERR(vma)) {
|
||||
err = PTR_ERR(vma);
|
||||
goto err;
|
||||
return err;
|
||||
}
|
||||
|
||||
err = eb_validate_vma(eb, &eb->exec[i], vma);
|
||||
if (unlikely(err)) {
|
||||
i915_vma_put(vma);
|
||||
goto err;
|
||||
return err;
|
||||
}
|
||||
|
||||
err = eb_add_vma(eb, ¤t_batch, i, vma);
|
||||
@ -966,19 +966,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
|
||||
|
||||
if (i915_gem_object_is_userptr(vma->obj)) {
|
||||
err = i915_gem_object_userptr_submit_init(vma->obj);
|
||||
if (err) {
|
||||
if (i + 1 < eb->buffer_count) {
|
||||
/*
|
||||
* Execbuffer code expects last vma entry to be NULL,
|
||||
* since we already initialized this entry,
|
||||
* set the next value to NULL or we mess up
|
||||
* cleanup handling.
|
||||
*/
|
||||
eb->vma[i + 1].vma = NULL;
|
||||
}
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
eb->vma[i].flags |= __EXEC_OBJECT_USERPTR_INIT;
|
||||
eb->args->flags |= __EXEC_USERPTR_USED;
|
||||
@ -986,10 +975,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
eb->vma[i].vma = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int eb_lock_vmas(struct i915_execbuffer *eb)
|
||||
@ -3375,7 +3360,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
||||
|
||||
eb.exec = exec;
|
||||
eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
|
||||
eb.vma[0].vma = NULL;
|
||||
memset(eb.vma, 0, (args->buffer_count + 1) * sizeof(struct eb_vma));
|
||||
|
||||
eb.batch_pool = NULL;
|
||||
|
||||
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
|
||||
@ -3584,7 +3570,18 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Allocate extra slots for use by the command parser */
|
||||
/*
|
||||
* Allocate extra slots for use by the command parser.
|
||||
*
|
||||
* Note that this allocation handles two different arrays (the
|
||||
* exec2_list array, and the eventual eb.vma array introduced in
|
||||
* i915_gem_do_execbuffer()), that reside in virtually contiguous
|
||||
* memory. Also note that the allocation intentionally doesn't fill the
|
||||
* area with zeros, because the exec2_list part doesn't need to be, as
|
||||
* it's immediately overwritten by user data a few lines below.
|
||||
* However, the eb.vma part is explicitly zeroed later in
|
||||
* i915_gem_do_execbuffer().
|
||||
*/
|
||||
exec2_list = kvmalloc_array(count + 2, eb_element_size(),
|
||||
__GFP_NOWARN | GFP_KERNEL);
|
||||
if (exec2_list == NULL) {
|
||||
|
||||
@ -28,6 +28,16 @@ static void pvr_gem_object_free(struct drm_gem_object *obj)
|
||||
drm_gem_shmem_object_free(obj);
|
||||
}
|
||||
|
||||
static struct dma_buf *pvr_gem_export(struct drm_gem_object *obj, int flags)
|
||||
{
|
||||
struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(obj);
|
||||
|
||||
if (pvr_obj->flags & DRM_PVR_BO_PM_FW_PROTECT)
|
||||
return ERR_PTR(-EPERM);
|
||||
|
||||
return drm_gem_prime_export(obj, flags);
|
||||
}
|
||||
|
||||
static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma)
|
||||
{
|
||||
struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(gem_obj);
|
||||
@ -42,6 +52,7 @@ static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *v
|
||||
static const struct drm_gem_object_funcs pvr_gem_object_funcs = {
|
||||
.free = pvr_gem_object_free,
|
||||
.print_info = drm_gem_shmem_object_print_info,
|
||||
.export = pvr_gem_export,
|
||||
.pin = drm_gem_shmem_object_pin,
|
||||
.unpin = drm_gem_shmem_object_unpin,
|
||||
.get_sg_table = drm_gem_shmem_object_get_sg_table,
|
||||
|
||||
@ -152,8 +152,21 @@ static inline struct nv50_head_atom *
|
||||
nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
|
||||
|
||||
if (IS_ERR(statec))
|
||||
return (void *)statec;
|
||||
|
||||
return nv50_head_atom(statec);
|
||||
}
|
||||
|
||||
static inline struct nv50_head_atom *
|
||||
nv50_head_atom_get_new(struct drm_atomic_state *state, struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_crtc_state *statec = drm_atomic_get_new_crtc_state(state, crtc);
|
||||
|
||||
if (!statec)
|
||||
return NULL;
|
||||
|
||||
return nv50_head_atom(statec);
|
||||
}
|
||||
|
||||
|
||||
@ -583,7 +583,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
|
||||
asyw->image.offset[0] = nvbo->offset;
|
||||
|
||||
if (wndw->func->prepare) {
|
||||
asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
|
||||
asyh = nv50_head_atom_get_new(asyw->state.state, asyw->state.crtc);
|
||||
if (IS_ERR(asyh))
|
||||
return PTR_ERR(asyh);
|
||||
|
||||
|
||||
@ -104,7 +104,9 @@ static void g2h_fence_cancel(struct g2h_fence *g2h_fence)
|
||||
{
|
||||
g2h_fence->cancel = true;
|
||||
g2h_fence->fail = true;
|
||||
g2h_fence->done = true;
|
||||
|
||||
/* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
|
||||
WRITE_ONCE(g2h_fence->done, true);
|
||||
}
|
||||
|
||||
static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
|
||||
@ -1203,10 +1205,13 @@ retry_same_fence:
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
|
||||
/* READ_ONCEs pairs with WRITE_ONCEs in parse_g2h_response
|
||||
* and g2h_fence_cancel.
|
||||
*/
|
||||
ret = wait_event_timeout(ct->g2h_fence_wq, READ_ONCE(g2h_fence.done), HZ);
|
||||
if (!ret) {
|
||||
LNL_FLUSH_WORK(&ct->g2h_worker);
|
||||
if (g2h_fence.done) {
|
||||
if (READ_ONCE(g2h_fence.done)) {
|
||||
xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
|
||||
g2h_fence.seqno, action[0]);
|
||||
ret = 1;
|
||||
@ -1454,7 +1459,8 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
|
||||
|
||||
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
|
||||
|
||||
g2h_fence->done = true;
|
||||
/* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
|
||||
WRITE_ONCE(g2h_fence->done, true);
|
||||
smp_mb();
|
||||
|
||||
wake_up_all(&ct->g2h_fence_wq);
|
||||
|
||||
@ -2062,6 +2062,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
|
||||
unsigned long sram_offset,
|
||||
struct drm_pagemap_addr *sram_addr,
|
||||
u64 vram_addr,
|
||||
struct dma_fence *deps,
|
||||
const enum xe_migrate_copy_dir dir)
|
||||
{
|
||||
struct xe_gt *gt = m->tile->primary_gt;
|
||||
@ -2150,6 +2151,14 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
|
||||
|
||||
xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
|
||||
|
||||
if (deps && !dma_fence_is_signaled(deps)) {
|
||||
dma_fence_get(deps);
|
||||
err = drm_sched_job_add_dependency(&job->drm, deps);
|
||||
if (err)
|
||||
dma_fence_wait(deps, false);
|
||||
err = 0;
|
||||
}
|
||||
|
||||
mutex_lock(&m->job_mutex);
|
||||
xe_sched_job_arm(job);
|
||||
fence = dma_fence_get(&job->drm.s_fence->finished);
|
||||
@ -2175,6 +2184,8 @@ err:
|
||||
* @npages: Number of pages to migrate.
|
||||
* @src_addr: Array of DMA information (source of migrate)
|
||||
* @dst_addr: Device physical address of VRAM (destination of migrate)
|
||||
* @deps: struct dma_fence representing the dependencies that need
|
||||
* to be signaled before migration.
|
||||
*
|
||||
* Copy from an array dma addresses to a VRAM device physical address
|
||||
*
|
||||
@ -2184,10 +2195,11 @@ err:
|
||||
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
|
||||
unsigned long npages,
|
||||
struct drm_pagemap_addr *src_addr,
|
||||
u64 dst_addr)
|
||||
u64 dst_addr,
|
||||
struct dma_fence *deps)
|
||||
{
|
||||
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
|
||||
XE_MIGRATE_COPY_TO_VRAM);
|
||||
deps, XE_MIGRATE_COPY_TO_VRAM);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2196,6 +2208,8 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
|
||||
* @npages: Number of pages to migrate.
|
||||
* @src_addr: Device physical address of VRAM (source of migrate)
|
||||
* @dst_addr: Array of DMA information (destination of migrate)
|
||||
* @deps: struct dma_fence representing the dependencies that need
|
||||
* to be signaled before migration.
|
||||
*
|
||||
* Copy from a VRAM device physical address to an array dma addresses
|
||||
*
|
||||
@ -2205,10 +2219,11 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
|
||||
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
|
||||
unsigned long npages,
|
||||
u64 src_addr,
|
||||
struct drm_pagemap_addr *dst_addr)
|
||||
struct drm_pagemap_addr *dst_addr,
|
||||
struct dma_fence *deps)
|
||||
{
|
||||
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
|
||||
XE_MIGRATE_COPY_TO_SRAM);
|
||||
deps, XE_MIGRATE_COPY_TO_SRAM);
|
||||
}
|
||||
|
||||
static void xe_migrate_dma_unmap(struct xe_device *xe,
|
||||
@ -2384,7 +2399,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
|
||||
__fence = xe_migrate_vram(m, current_bytes,
|
||||
(unsigned long)buf & ~PAGE_MASK,
|
||||
&pagemap_addr[current_page],
|
||||
vram_addr, write ?
|
||||
vram_addr, NULL, write ?
|
||||
XE_MIGRATE_COPY_TO_VRAM :
|
||||
XE_MIGRATE_COPY_TO_SRAM);
|
||||
if (IS_ERR(__fence)) {
|
||||
|
||||
@ -116,12 +116,14 @@ int xe_migrate_init(struct xe_migrate *m);
|
||||
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
|
||||
unsigned long npages,
|
||||
struct drm_pagemap_addr *src_addr,
|
||||
u64 dst_addr);
|
||||
u64 dst_addr,
|
||||
struct dma_fence *deps);
|
||||
|
||||
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
|
||||
unsigned long npages,
|
||||
u64 src_addr,
|
||||
struct drm_pagemap_addr *dst_addr);
|
||||
struct drm_pagemap_addr *dst_addr,
|
||||
struct dma_fence *deps);
|
||||
|
||||
struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
|
||||
struct xe_bo *src_bo,
|
||||
|
||||
@ -476,7 +476,8 @@ static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
|
||||
|
||||
static int xe_svm_copy(struct page **pages,
|
||||
struct drm_pagemap_addr *pagemap_addr,
|
||||
unsigned long npages, const enum xe_svm_copy_dir dir)
|
||||
unsigned long npages, const enum xe_svm_copy_dir dir,
|
||||
struct dma_fence *pre_migrate_fence)
|
||||
{
|
||||
struct xe_vram_region *vr = NULL;
|
||||
struct xe_gt *gt = NULL;
|
||||
@ -565,7 +566,8 @@ static int xe_svm_copy(struct page **pages,
|
||||
__fence = xe_migrate_from_vram(vr->migrate,
|
||||
i - pos + incr,
|
||||
vram_addr,
|
||||
&pagemap_addr[pos]);
|
||||
&pagemap_addr[pos],
|
||||
pre_migrate_fence);
|
||||
} else {
|
||||
vm_dbg(&xe->drm,
|
||||
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
|
||||
@ -574,13 +576,14 @@ static int xe_svm_copy(struct page **pages,
|
||||
__fence = xe_migrate_to_vram(vr->migrate,
|
||||
i - pos + incr,
|
||||
&pagemap_addr[pos],
|
||||
vram_addr);
|
||||
vram_addr,
|
||||
pre_migrate_fence);
|
||||
}
|
||||
if (IS_ERR(__fence)) {
|
||||
err = PTR_ERR(__fence);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
pre_migrate_fence = NULL;
|
||||
dma_fence_put(fence);
|
||||
fence = __fence;
|
||||
}
|
||||
@ -603,20 +606,22 @@ static int xe_svm_copy(struct page **pages,
|
||||
vram_addr, (u64)pagemap_addr[pos].addr, 1);
|
||||
__fence = xe_migrate_from_vram(vr->migrate, 1,
|
||||
vram_addr,
|
||||
&pagemap_addr[pos]);
|
||||
&pagemap_addr[pos],
|
||||
pre_migrate_fence);
|
||||
} else {
|
||||
vm_dbg(&xe->drm,
|
||||
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
|
||||
(u64)pagemap_addr[pos].addr, vram_addr, 1);
|
||||
__fence = xe_migrate_to_vram(vr->migrate, 1,
|
||||
&pagemap_addr[pos],
|
||||
vram_addr);
|
||||
vram_addr,
|
||||
pre_migrate_fence);
|
||||
}
|
||||
if (IS_ERR(__fence)) {
|
||||
err = PTR_ERR(__fence);
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
pre_migrate_fence = NULL;
|
||||
dma_fence_put(fence);
|
||||
fence = __fence;
|
||||
}
|
||||
@ -629,6 +634,8 @@ err_out:
|
||||
dma_fence_wait(fence, false);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
if (pre_migrate_fence)
|
||||
dma_fence_wait(pre_migrate_fence, false);
|
||||
|
||||
/*
|
||||
* XXX: We can't derive the GT here (or anywhere in this functions, but
|
||||
@ -645,16 +652,20 @@ err_out:
|
||||
|
||||
static int xe_svm_copy_to_devmem(struct page **pages,
|
||||
struct drm_pagemap_addr *pagemap_addr,
|
||||
unsigned long npages)
|
||||
unsigned long npages,
|
||||
struct dma_fence *pre_migrate_fence)
|
||||
{
|
||||
return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
|
||||
return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM,
|
||||
pre_migrate_fence);
|
||||
}
|
||||
|
||||
static int xe_svm_copy_to_ram(struct page **pages,
|
||||
struct drm_pagemap_addr *pagemap_addr,
|
||||
unsigned long npages)
|
||||
unsigned long npages,
|
||||
struct dma_fence *pre_migrate_fence)
|
||||
{
|
||||
return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
|
||||
return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM,
|
||||
pre_migrate_fence);
|
||||
}
|
||||
|
||||
static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
|
||||
@ -667,6 +678,7 @@ static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
|
||||
struct xe_bo *bo = to_xe_bo(devmem_allocation);
|
||||
struct xe_device *xe = xe_bo_device(bo);
|
||||
|
||||
dma_fence_put(devmem_allocation->pre_migrate_fence);
|
||||
xe_bo_put_async(bo);
|
||||
xe_pm_runtime_put(xe);
|
||||
}
|
||||
@ -861,6 +873,7 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
|
||||
unsigned long timeslice_ms)
|
||||
{
|
||||
struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
|
||||
struct dma_fence *pre_migrate_fence = NULL;
|
||||
struct xe_device *xe = vr->xe;
|
||||
struct device *dev = xe->drm.dev;
|
||||
struct drm_buddy_block *block;
|
||||
@ -887,8 +900,20 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
|
||||
break;
|
||||
}
|
||||
|
||||
/* Ensure that any clearing or async eviction will complete before migration. */
|
||||
if (!dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL)) {
|
||||
err = dma_resv_get_singleton(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
|
||||
&pre_migrate_fence);
|
||||
if (err)
|
||||
dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
|
||||
false, MAX_SCHEDULE_TIMEOUT);
|
||||
else if (pre_migrate_fence)
|
||||
dma_fence_enable_sw_signaling(pre_migrate_fence);
|
||||
}
|
||||
|
||||
drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
|
||||
&dpagemap_devmem_ops, dpagemap, end - start);
|
||||
&dpagemap_devmem_ops, dpagemap, end - start,
|
||||
pre_migrate_fence);
|
||||
|
||||
blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
|
||||
list_for_each_entry(block, blocks, link)
|
||||
@ -941,7 +966,7 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
|
||||
xe_assert(vm->xe, IS_DGFX(vm->xe));
|
||||
|
||||
if (xe_svm_range_in_vram(range)) {
|
||||
drm_info(&vm->xe->drm, "Range is already in VRAM\n");
|
||||
drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -8,6 +8,7 @@
|
||||
|
||||
#define NR_PAGES(order) (1U << (order))
|
||||
|
||||
struct dma_fence;
|
||||
struct drm_pagemap;
|
||||
struct drm_pagemap_zdd;
|
||||
struct device;
|
||||
@ -174,6 +175,8 @@ struct drm_pagemap_devmem_ops {
|
||||
* @pages: Pointer to array of device memory pages (destination)
|
||||
* @pagemap_addr: Pointer to array of DMA information (source)
|
||||
* @npages: Number of pages to copy
|
||||
* @pre_migrate_fence: dma-fence to wait for before migration start.
|
||||
* May be NULL.
|
||||
*
|
||||
* Copy pages to device memory. If the order of a @pagemap_addr entry
|
||||
* is greater than 0, the entry is populated but subsequent entries
|
||||
@ -183,13 +186,16 @@ struct drm_pagemap_devmem_ops {
|
||||
*/
|
||||
int (*copy_to_devmem)(struct page **pages,
|
||||
struct drm_pagemap_addr *pagemap_addr,
|
||||
unsigned long npages);
|
||||
unsigned long npages,
|
||||
struct dma_fence *pre_migrate_fence);
|
||||
|
||||
/**
|
||||
* @copy_to_ram: Copy to system RAM (required for migration)
|
||||
* @pages: Pointer to array of device memory pages (source)
|
||||
* @pagemap_addr: Pointer to array of DMA information (destination)
|
||||
* @npages: Number of pages to copy
|
||||
* @pre_migrate_fence: dma-fence to wait for before migration start.
|
||||
* May be NULL.
|
||||
*
|
||||
* Copy pages to system RAM. If the order of a @pagemap_addr entry
|
||||
* is greater than 0, the entry is populated but subsequent entries
|
||||
@ -199,7 +205,8 @@ struct drm_pagemap_devmem_ops {
|
||||
*/
|
||||
int (*copy_to_ram)(struct page **pages,
|
||||
struct drm_pagemap_addr *pagemap_addr,
|
||||
unsigned long npages);
|
||||
unsigned long npages,
|
||||
struct dma_fence *pre_migrate_fence);
|
||||
};
|
||||
|
||||
/**
|
||||
@ -212,6 +219,8 @@ struct drm_pagemap_devmem_ops {
|
||||
* @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
|
||||
* @size: Size of device memory allocation
|
||||
* @timeslice_expiration: Timeslice expiration in jiffies
|
||||
* @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts.
|
||||
* (May be NULL).
|
||||
*/
|
||||
struct drm_pagemap_devmem {
|
||||
struct device *dev;
|
||||
@ -221,6 +230,7 @@ struct drm_pagemap_devmem {
|
||||
struct drm_pagemap *dpagemap;
|
||||
size_t size;
|
||||
u64 timeslice_expiration;
|
||||
struct dma_fence *pre_migrate_fence;
|
||||
};
|
||||
|
||||
int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
|
||||
@ -238,7 +248,8 @@ struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
|
||||
void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
|
||||
struct device *dev, struct mm_struct *mm,
|
||||
const struct drm_pagemap_devmem_ops *ops,
|
||||
struct drm_pagemap *dpagemap, size_t size);
|
||||
struct drm_pagemap *dpagemap, size_t size,
|
||||
struct dma_fence *pre_migrate_fence);
|
||||
|
||||
int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
|
||||
unsigned long start, unsigned long end,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user