mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-12 01:20:14 +00:00
Merge tag 'drm-msm-fixes-2025-06-16' of https://gitlab.freedesktop.org/drm/msm into drm-fixes
Fixes for v6.16-rc3 Display: - Fixed DP output on SDM845 - Fixed 10nm DSI PLL init GPU: - SUBMIT ioctl error path leak fixes - drm half of stall-on-fault fixes. Note there is a soft dependency, to get correct mmu fault devcoredumps, on arm-smmu changes which are not in this branch, but have already been merged by Linus. So by the time Linus merges this, everything should be peachy. - a7xx: Missing CP_RESET_CONTEXT_STATE - Skip GPU component bind if GPU is not in the device table. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rob Clark <rob.clark@oss.qualcomm.com> Link: https://lore.kernel.org/r/CACSVV03=OH74ip8O1xqb8RJWGyM4HFuUnWuR=p3zJR+-ko_AJA@mail.gmail.com
This commit is contained in:
commit
49a5fdc06c
@ -71,10 +71,6 @@ static int a2xx_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void a2xx_gpummu_resume_translation(struct msm_mmu *mmu)
|
||||
{
|
||||
}
|
||||
|
||||
static void a2xx_gpummu_destroy(struct msm_mmu *mmu)
|
||||
{
|
||||
struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
|
||||
@ -90,7 +86,6 @@ static const struct msm_mmu_funcs funcs = {
|
||||
.map = a2xx_gpummu_map,
|
||||
.unmap = a2xx_gpummu_unmap,
|
||||
.destroy = a2xx_gpummu_destroy,
|
||||
.resume_translation = a2xx_gpummu_resume_translation,
|
||||
};
|
||||
|
||||
struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu)
|
||||
|
||||
@ -131,6 +131,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
struct msm_ringbuffer *ring = submit->ring;
|
||||
unsigned int i, ibs = 0;
|
||||
|
||||
adreno_check_and_reenable_stall(adreno_gpu);
|
||||
|
||||
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
|
||||
ring->cur_ctx_seqno = 0;
|
||||
a5xx_submit_in_rb(gpu, submit);
|
||||
|
||||
@ -130,6 +130,20 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
|
||||
OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
|
||||
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
|
||||
OUT_RING(ring, submit->seqno - 1);
|
||||
|
||||
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
|
||||
OUT_RING(ring, CP_SET_THREAD_BOTH);
|
||||
|
||||
/* Reset state used to synchronize BR and BV */
|
||||
OUT_PKT7(ring, CP_RESET_CONTEXT_STATE, 1);
|
||||
OUT_RING(ring,
|
||||
CP_RESET_CONTEXT_STATE_0_CLEAR_ON_CHIP_TS |
|
||||
CP_RESET_CONTEXT_STATE_0_CLEAR_RESOURCE_TABLE |
|
||||
CP_RESET_CONTEXT_STATE_0_CLEAR_BV_BR_COUNTER |
|
||||
CP_RESET_CONTEXT_STATE_0_RESET_GLOBAL_LOCAL_TS);
|
||||
|
||||
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
|
||||
OUT_RING(ring, CP_SET_THREAD_BR);
|
||||
}
|
||||
|
||||
if (!sysprof) {
|
||||
@ -212,6 +226,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
struct msm_ringbuffer *ring = submit->ring;
|
||||
unsigned int i, ibs = 0;
|
||||
|
||||
adreno_check_and_reenable_stall(adreno_gpu);
|
||||
|
||||
a6xx_set_pagetable(a6xx_gpu, ring, submit);
|
||||
|
||||
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
|
||||
@ -335,6 +351,8 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
struct msm_ringbuffer *ring = submit->ring;
|
||||
unsigned int i, ibs = 0;
|
||||
|
||||
adreno_check_and_reenable_stall(adreno_gpu);
|
||||
|
||||
/*
|
||||
* Toggle concurrent binning for pagetable switch and set the thread to
|
||||
* BR since only it can execute the pagetable switch packets.
|
||||
|
||||
@ -137,9 +137,8 @@ err_disable_rpm:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int find_chipid(struct device *dev, uint32_t *chipid)
|
||||
static int find_chipid(struct device_node *node, uint32_t *chipid)
|
||||
{
|
||||
struct device_node *node = dev->of_node;
|
||||
const char *compat;
|
||||
int ret;
|
||||
|
||||
@ -173,15 +172,36 @@ static int find_chipid(struct device *dev, uint32_t *chipid)
|
||||
/* and if that fails, fall back to legacy "qcom,chipid" property: */
|
||||
ret = of_property_read_u32(node, "qcom,chipid", chipid);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
|
||||
DRM_ERROR("%pOF: could not parse qcom,chipid: %d\n",
|
||||
node, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_warn(dev, "Using legacy qcom,chipid binding!\n");
|
||||
pr_warn("%pOF: Using legacy qcom,chipid binding!\n", node);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool adreno_has_gpu(struct device_node *node)
|
||||
{
|
||||
const struct adreno_info *info;
|
||||
uint32_t chip_id;
|
||||
int ret;
|
||||
|
||||
ret = find_chipid(node, &chip_id);
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
info = adreno_info(chip_id);
|
||||
if (!info) {
|
||||
pr_warn("%pOF: Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n",
|
||||
node, ADRENO_CHIPID_ARGS(chip_id));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int adreno_bind(struct device *dev, struct device *master, void *data)
|
||||
{
|
||||
static struct adreno_platform_config config = {};
|
||||
@ -191,19 +211,18 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
|
||||
struct msm_gpu *gpu;
|
||||
int ret;
|
||||
|
||||
ret = find_chipid(dev, &config.chip_id);
|
||||
if (ret)
|
||||
ret = find_chipid(dev->of_node, &config.chip_id);
|
||||
/* We shouldn't have gotten this far if we can't parse the chip_id */
|
||||
if (WARN_ON(ret))
|
||||
return ret;
|
||||
|
||||
dev->platform_data = &config;
|
||||
priv->gpu_pdev = to_platform_device(dev);
|
||||
|
||||
info = adreno_info(config.chip_id);
|
||||
if (!info) {
|
||||
dev_warn(drm->dev, "Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n",
|
||||
ADRENO_CHIPID_ARGS(config.chip_id));
|
||||
/* We shouldn't have gotten this far if we don't recognize the GPU: */
|
||||
if (WARN_ON(!info))
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
config.info = info;
|
||||
|
||||
|
||||
@ -259,24 +259,54 @@ u64 adreno_private_address_space_size(struct msm_gpu *gpu)
|
||||
return BIT(ttbr1_cfg->ias) - ADRENO_VM_START;
|
||||
}
|
||||
|
||||
void adreno_check_and_reenable_stall(struct adreno_gpu *adreno_gpu)
|
||||
{
|
||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Wait until the cooldown period has passed and we would actually
|
||||
* collect a crashdump to re-enable stall-on-fault.
|
||||
*/
|
||||
spin_lock_irqsave(&priv->fault_stall_lock, flags);
|
||||
if (!priv->stall_enabled &&
|
||||
ktime_after(ktime_get(), priv->stall_reenable_time) &&
|
||||
!READ_ONCE(gpu->crashstate)) {
|
||||
priv->stall_enabled = true;
|
||||
|
||||
gpu->aspace->mmu->funcs->set_stall(gpu->aspace->mmu, true);
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->fault_stall_lock, flags);
|
||||
}
|
||||
|
||||
#define ARM_SMMU_FSR_TF BIT(1)
|
||||
#define ARM_SMMU_FSR_PF BIT(3)
|
||||
#define ARM_SMMU_FSR_EF BIT(4)
|
||||
#define ARM_SMMU_FSR_SS BIT(30)
|
||||
|
||||
int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
|
||||
struct adreno_smmu_fault_info *info, const char *block,
|
||||
u32 scratch[4])
|
||||
{
|
||||
struct msm_drm_private *priv = gpu->dev->dev_private;
|
||||
const char *type = "UNKNOWN";
|
||||
bool do_devcoredump = info && !READ_ONCE(gpu->crashstate);
|
||||
bool do_devcoredump = info && (info->fsr & ARM_SMMU_FSR_SS) &&
|
||||
!READ_ONCE(gpu->crashstate);
|
||||
unsigned long irq_flags;
|
||||
|
||||
/*
|
||||
* If we aren't going to be resuming later from fault_worker, then do
|
||||
* it now.
|
||||
* In case there is a subsequent storm of pagefaults, disable
|
||||
* stall-on-fault for at least half a second.
|
||||
*/
|
||||
if (!do_devcoredump) {
|
||||
gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
|
||||
spin_lock_irqsave(&priv->fault_stall_lock, irq_flags);
|
||||
if (priv->stall_enabled) {
|
||||
priv->stall_enabled = false;
|
||||
|
||||
gpu->aspace->mmu->funcs->set_stall(gpu->aspace->mmu, false);
|
||||
}
|
||||
priv->stall_reenable_time = ktime_add_ms(ktime_get(), 500);
|
||||
spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags);
|
||||
|
||||
/*
|
||||
* Print a default message if we couldn't get the data from the
|
||||
@ -304,16 +334,18 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
|
||||
scratch[0], scratch[1], scratch[2], scratch[3]);
|
||||
|
||||
if (do_devcoredump) {
|
||||
struct msm_gpu_fault_info fault_info = {};
|
||||
|
||||
/* Turn off the hangcheck timer to keep it from bothering us */
|
||||
timer_delete(&gpu->hangcheck_timer);
|
||||
|
||||
gpu->fault_info.ttbr0 = info->ttbr0;
|
||||
gpu->fault_info.iova = iova;
|
||||
gpu->fault_info.flags = flags;
|
||||
gpu->fault_info.type = type;
|
||||
gpu->fault_info.block = block;
|
||||
fault_info.ttbr0 = info->ttbr0;
|
||||
fault_info.iova = iova;
|
||||
fault_info.flags = flags;
|
||||
fault_info.type = type;
|
||||
fault_info.block = block;
|
||||
|
||||
kthread_queue_work(gpu->worker, &gpu->fault_work);
|
||||
msm_gpu_fault_crashstate_capture(gpu, &fault_info);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@ -636,6 +636,8 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
|
||||
struct adreno_smmu_fault_info *info, const char *block,
|
||||
u32 scratch[4]);
|
||||
|
||||
void adreno_check_and_reenable_stall(struct adreno_gpu *gpu);
|
||||
|
||||
int adreno_read_speedbin(struct device *dev, u32 *speedbin);
|
||||
|
||||
/*
|
||||
|
||||
@ -94,17 +94,21 @@ static void drm_mode_to_intf_timing_params(
|
||||
timing->vsync_polarity = 0;
|
||||
}
|
||||
|
||||
/* for DP/EDP, Shift timings to align it to bottom right */
|
||||
if (phys_enc->hw_intf->cap->type == INTF_DP) {
|
||||
timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
|
||||
timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
|
||||
|
||||
/*
|
||||
* For DP/EDP, Shift timings to align it to bottom right.
|
||||
* wide_bus_en is set for everything excluding SDM845 &
|
||||
* porch changes cause DisplayPort failure and HDMI tearing.
|
||||
*/
|
||||
if (phys_enc->hw_intf->cap->type == INTF_DP && timing->wide_bus_en) {
|
||||
timing->h_back_porch += timing->h_front_porch;
|
||||
timing->h_front_porch = 0;
|
||||
timing->v_back_porch += timing->v_front_porch;
|
||||
timing->v_front_porch = 0;
|
||||
}
|
||||
|
||||
timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
|
||||
timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
|
||||
|
||||
/*
|
||||
* for DP, divide the horizonal parameters by 2 when
|
||||
* widebus is enabled
|
||||
|
||||
@ -128,6 +128,11 @@ static const struct msm_dp_desc msm_dp_desc_sa8775p[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct msm_dp_desc msm_dp_desc_sdm845[] = {
|
||||
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct msm_dp_desc msm_dp_desc_sc7180[] = {
|
||||
{ .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
|
||||
{}
|
||||
@ -180,7 +185,7 @@ static const struct of_device_id msm_dp_dt_match[] = {
|
||||
{ .compatible = "qcom,sc8180x-edp", .data = &msm_dp_desc_sc8180x },
|
||||
{ .compatible = "qcom,sc8280xp-dp", .data = &msm_dp_desc_sc8280xp },
|
||||
{ .compatible = "qcom,sc8280xp-edp", .data = &msm_dp_desc_sc8280xp },
|
||||
{ .compatible = "qcom,sdm845-dp", .data = &msm_dp_desc_sc7180 },
|
||||
{ .compatible = "qcom,sdm845-dp", .data = &msm_dp_desc_sdm845 },
|
||||
{ .compatible = "qcom,sm8350-dp", .data = &msm_dp_desc_sc7180 },
|
||||
{ .compatible = "qcom,sm8650-dp", .data = &msm_dp_desc_sm8650 },
|
||||
{ .compatible = "qcom,x1e80100-dp", .data = &msm_dp_desc_x1e80100 },
|
||||
|
||||
@ -704,6 +704,13 @@ static int dsi_pll_10nm_init(struct msm_dsi_phy *phy)
|
||||
/* TODO: Remove this when we have proper display handover support */
|
||||
msm_dsi_phy_pll_save_state(phy);
|
||||
|
||||
/*
|
||||
* Store also proper vco_current_rate, because its value will be used in
|
||||
* dsi_10nm_pll_restore_state().
|
||||
*/
|
||||
if (!dsi_pll_10nm_vco_recalc_rate(&pll_10nm->clk_hw, VCO_REF_CLK_RATE))
|
||||
pll_10nm->vco_current_rate = pll_10nm->phy->cfg->min_pll_rate;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -208,6 +208,35 @@ DEFINE_DEBUGFS_ATTRIBUTE(shrink_fops,
|
||||
shrink_get, shrink_set,
|
||||
"0x%08llx\n");
|
||||
|
||||
/*
|
||||
* Return the number of microseconds to wait until stall-on-fault is
|
||||
* re-enabled. If 0 then it is already enabled or will be re-enabled on the
|
||||
* next submit (unless there's a leftover devcoredump). This is useful for
|
||||
* kernel tests that intentionally produce a fault and check the devcoredump to
|
||||
* wait until the cooldown period is over.
|
||||
*/
|
||||
|
||||
static int
|
||||
stall_reenable_time_get(void *data, u64 *val)
|
||||
{
|
||||
struct msm_drm_private *priv = data;
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&priv->fault_stall_lock, irq_flags);
|
||||
|
||||
if (priv->stall_enabled)
|
||||
*val = 0;
|
||||
else
|
||||
*val = max(ktime_us_delta(priv->stall_reenable_time, ktime_get()), 0);
|
||||
|
||||
spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(stall_reenable_time_fops,
|
||||
stall_reenable_time_get, NULL,
|
||||
"%lld\n");
|
||||
|
||||
static int msm_gem_show(struct seq_file *m, void *arg)
|
||||
{
|
||||
@ -319,6 +348,9 @@ static void msm_debugfs_gpu_init(struct drm_minor *minor)
|
||||
debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root,
|
||||
&priv->disable_err_irq);
|
||||
|
||||
debugfs_create_file("stall_reenable_time_us", 0400, minor->debugfs_root,
|
||||
priv, &stall_reenable_time_fops);
|
||||
|
||||
gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root);
|
||||
|
||||
debugfs_create_bool("idle_clamp",0600, gpu_devfreq,
|
||||
|
||||
@ -245,6 +245,10 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
|
||||
drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
|
||||
drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
|
||||
|
||||
/* Initialize stall-on-fault */
|
||||
spin_lock_init(&priv->fault_stall_lock);
|
||||
priv->stall_enabled = true;
|
||||
|
||||
/* Teach lockdep about lock ordering wrt. shrinker: */
|
||||
fs_reclaim_acquire(GFP_KERNEL);
|
||||
might_lock(&priv->lru.lock);
|
||||
@ -926,7 +930,7 @@ static const struct drm_driver msm_driver = {
|
||||
* is no external component that we need to add since LVDS is within MDP4
|
||||
* itself.
|
||||
*/
|
||||
static int add_components_mdp(struct device *master_dev,
|
||||
static int add_mdp_components(struct device *master_dev,
|
||||
struct component_match **matchptr)
|
||||
{
|
||||
struct device_node *np = master_dev->of_node;
|
||||
@ -1030,7 +1034,7 @@ static int add_gpu_components(struct device *dev,
|
||||
if (!np)
|
||||
return 0;
|
||||
|
||||
if (of_device_is_available(np))
|
||||
if (of_device_is_available(np) && adreno_has_gpu(np))
|
||||
drm_of_component_match_add(dev, matchptr, component_compare_of, np);
|
||||
|
||||
of_node_put(np);
|
||||
@ -1071,7 +1075,7 @@ int msm_drv_probe(struct device *master_dev,
|
||||
|
||||
/* Add mdp components if we have KMS. */
|
||||
if (kms_init) {
|
||||
ret = add_components_mdp(master_dev, &match);
|
||||
ret = add_mdp_components(master_dev, &match);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -222,6 +222,29 @@ struct msm_drm_private {
|
||||
* the sw hangcheck mechanism.
|
||||
*/
|
||||
bool disable_err_irq;
|
||||
|
||||
/**
|
||||
* @fault_stall_lock:
|
||||
*
|
||||
* Serialize changes to stall-on-fault state.
|
||||
*/
|
||||
spinlock_t fault_stall_lock;
|
||||
|
||||
/**
|
||||
* @fault_stall_reenable_time:
|
||||
*
|
||||
* If stall_enabled is false, when to reenable stall-on-fault.
|
||||
* Protected by @fault_stall_lock.
|
||||
*/
|
||||
ktime_t stall_reenable_time;
|
||||
|
||||
/**
|
||||
* @stall_enabled:
|
||||
*
|
||||
* Whether stall-on-fault is currently enabled. Protected by
|
||||
* @fault_stall_lock.
|
||||
*/
|
||||
bool stall_enabled;
|
||||
};
|
||||
|
||||
const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
|
||||
|
||||
@ -85,6 +85,15 @@ void __msm_gem_submit_destroy(struct kref *kref)
|
||||
container_of(kref, struct msm_gem_submit, ref);
|
||||
unsigned i;
|
||||
|
||||
/*
|
||||
* In error paths, we could unref the submit without calling
|
||||
* drm_sched_entity_push_job(), so msm_job_free() will never
|
||||
* get called. Since drm_sched_job_cleanup() will NULL out
|
||||
* s_fence, we can use that to detect this case.
|
||||
*/
|
||||
if (submit->base.s_fence)
|
||||
drm_sched_job_cleanup(&submit->base);
|
||||
|
||||
if (submit->fence_id) {
|
||||
spin_lock(&submit->queue->idr_lock);
|
||||
idr_remove(&submit->queue->fence_idr, submit->fence_id);
|
||||
@ -649,6 +658,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
struct msm_ringbuffer *ring;
|
||||
struct msm_submit_post_dep *post_deps = NULL;
|
||||
struct drm_syncobj **syncobjs_to_reset = NULL;
|
||||
struct sync_file *sync_file = NULL;
|
||||
int out_fence_fd = -1;
|
||||
unsigned i;
|
||||
int ret;
|
||||
@ -858,7 +868,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
|
||||
struct sync_file *sync_file = sync_file_create(submit->user_fence);
|
||||
sync_file = sync_file_create(submit->user_fence);
|
||||
if (!sync_file) {
|
||||
ret = -ENOMEM;
|
||||
} else {
|
||||
@ -892,8 +902,11 @@ out:
|
||||
out_unlock:
|
||||
mutex_unlock(&queue->lock);
|
||||
out_post_unlock:
|
||||
if (ret && (out_fence_fd >= 0))
|
||||
if (ret && (out_fence_fd >= 0)) {
|
||||
put_unused_fd(out_fence_fd);
|
||||
if (sync_file)
|
||||
fput(sync_file->file);
|
||||
}
|
||||
|
||||
if (!IS_ERR_OR_NULL(submit)) {
|
||||
msm_gem_submit_put(submit);
|
||||
|
||||
@ -257,7 +257,8 @@ out:
|
||||
}
|
||||
|
||||
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
|
||||
struct msm_gem_submit *submit, char *comm, char *cmd)
|
||||
struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
|
||||
char *comm, char *cmd)
|
||||
{
|
||||
struct msm_gpu_state *state;
|
||||
|
||||
@ -276,7 +277,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
|
||||
/* Fill in the additional crash state information */
|
||||
state->comm = kstrdup(comm, GFP_KERNEL);
|
||||
state->cmd = kstrdup(cmd, GFP_KERNEL);
|
||||
state->fault_info = gpu->fault_info;
|
||||
if (fault_info)
|
||||
state->fault_info = *fault_info;
|
||||
|
||||
if (submit) {
|
||||
int i;
|
||||
@ -308,7 +310,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
|
||||
}
|
||||
#else
|
||||
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
|
||||
struct msm_gem_submit *submit, char *comm, char *cmd)
|
||||
struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info,
|
||||
char *comm, char *cmd)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
@ -405,7 +408,7 @@ static void recover_worker(struct kthread_work *work)
|
||||
|
||||
/* Record the crash state */
|
||||
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||
msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
|
||||
msm_gpu_crashstate_capture(gpu, submit, NULL, comm, cmd);
|
||||
|
||||
kfree(cmd);
|
||||
kfree(comm);
|
||||
@ -459,9 +462,8 @@ out_unlock:
|
||||
msm_gpu_retire(gpu);
|
||||
}
|
||||
|
||||
static void fault_worker(struct kthread_work *work)
|
||||
void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info)
|
||||
{
|
||||
struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
|
||||
struct msm_gem_submit *submit;
|
||||
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
|
||||
char *comm = NULL, *cmd = NULL;
|
||||
@ -484,16 +486,13 @@ static void fault_worker(struct kthread_work *work)
|
||||
|
||||
/* Record the crash state */
|
||||
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||
msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
|
||||
msm_gpu_crashstate_capture(gpu, submit, fault_info, comm, cmd);
|
||||
pm_runtime_put_sync(&gpu->pdev->dev);
|
||||
|
||||
kfree(cmd);
|
||||
kfree(comm);
|
||||
|
||||
resume_smmu:
|
||||
memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
|
||||
gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
|
||||
|
||||
mutex_unlock(&gpu->lock);
|
||||
}
|
||||
|
||||
@ -882,7 +881,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
|
||||
init_waitqueue_head(&gpu->retire_event);
|
||||
kthread_init_work(&gpu->retire_work, retire_worker);
|
||||
kthread_init_work(&gpu->recover_work, recover_worker);
|
||||
kthread_init_work(&gpu->fault_work, fault_worker);
|
||||
|
||||
priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
|
||||
|
||||
|
||||
@ -253,12 +253,6 @@ struct msm_gpu {
|
||||
#define DRM_MSM_HANGCHECK_PROGRESS_RETRIES 3
|
||||
struct timer_list hangcheck_timer;
|
||||
|
||||
/* Fault info for most recent iova fault: */
|
||||
struct msm_gpu_fault_info fault_info;
|
||||
|
||||
/* work for handling GPU ioval faults: */
|
||||
struct kthread_work fault_work;
|
||||
|
||||
/* work for handling GPU recovery: */
|
||||
struct kthread_work recover_work;
|
||||
|
||||
@ -668,6 +662,7 @@ msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *ta
|
||||
void msm_gpu_cleanup(struct msm_gpu *gpu);
|
||||
|
||||
struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
|
||||
bool adreno_has_gpu(struct device_node *node);
|
||||
void __init adreno_register(void);
|
||||
void __exit adreno_unregister(void);
|
||||
|
||||
@ -705,6 +700,8 @@ static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
|
||||
mutex_unlock(&gpu->lock);
|
||||
}
|
||||
|
||||
void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info);
|
||||
|
||||
/*
|
||||
* Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
|
||||
* support expanded privileges
|
||||
|
||||
@ -345,7 +345,6 @@ static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev
|
||||
unsigned long iova, int flags, void *arg)
|
||||
{
|
||||
struct msm_iommu *iommu = arg;
|
||||
struct msm_mmu *mmu = &iommu->base;
|
||||
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
|
||||
struct adreno_smmu_fault_info info, *ptr = NULL;
|
||||
|
||||
@ -359,9 +358,6 @@ static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev
|
||||
|
||||
pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
|
||||
|
||||
if (mmu->funcs->resume_translation)
|
||||
mmu->funcs->resume_translation(mmu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -376,12 +372,12 @@ static int msm_disp_fault_handler(struct iommu_domain *domain, struct device *de
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static void msm_iommu_resume_translation(struct msm_mmu *mmu)
|
||||
static void msm_iommu_set_stall(struct msm_mmu *mmu, bool enable)
|
||||
{
|
||||
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
|
||||
|
||||
if (adreno_smmu->resume_translation)
|
||||
adreno_smmu->resume_translation(adreno_smmu->cookie, true);
|
||||
if (adreno_smmu->set_stall)
|
||||
adreno_smmu->set_stall(adreno_smmu->cookie, enable);
|
||||
}
|
||||
|
||||
static void msm_iommu_detach(struct msm_mmu *mmu)
|
||||
@ -431,7 +427,7 @@ static const struct msm_mmu_funcs funcs = {
|
||||
.map = msm_iommu_map,
|
||||
.unmap = msm_iommu_unmap,
|
||||
.destroy = msm_iommu_destroy,
|
||||
.resume_translation = msm_iommu_resume_translation,
|
||||
.set_stall = msm_iommu_set_stall,
|
||||
};
|
||||
|
||||
struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
|
||||
|
||||
@ -15,7 +15,7 @@ struct msm_mmu_funcs {
|
||||
size_t len, int prot);
|
||||
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
|
||||
void (*destroy)(struct msm_mmu *mmu);
|
||||
void (*resume_translation)(struct msm_mmu *mmu);
|
||||
void (*set_stall)(struct msm_mmu *mmu, bool enable);
|
||||
};
|
||||
|
||||
enum msm_mmu_type {
|
||||
|
||||
@ -2255,7 +2255,8 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
|
||||
<reg32 offset="0" name="0">
|
||||
<bitfield name="CLEAR_ON_CHIP_TS" pos="0" type="boolean"/>
|
||||
<bitfield name="CLEAR_RESOURCE_TABLE" pos="1" type="boolean"/>
|
||||
<bitfield name="CLEAR_GLOBAL_LOCAL_TS" pos="2" type="boolean"/>
|
||||
<bitfield name="CLEAR_BV_BR_COUNTER" pos="2" type="boolean"/>
|
||||
<bitfield name="RESET_GLOBAL_LOCAL_TS" pos="3" type="boolean"/>
|
||||
</reg32>
|
||||
</domain>
|
||||
|
||||
|
||||
@ -11,6 +11,7 @@ import collections
|
||||
import argparse
|
||||
import time
|
||||
import datetime
|
||||
import re
|
||||
|
||||
class Error(Exception):
|
||||
def __init__(self, message):
|
||||
@ -877,13 +878,14 @@ The rules-ng-ng source files this header was generated from are:
|
||||
""")
|
||||
maxlen = 0
|
||||
for filepath in p.xml_files:
|
||||
maxlen = max(maxlen, len(filepath))
|
||||
new_filepath = re.sub("^.+drivers","drivers",filepath)
|
||||
maxlen = max(maxlen, len(new_filepath))
|
||||
for filepath in p.xml_files:
|
||||
pad = " " * (maxlen - len(filepath))
|
||||
pad = " " * (maxlen - len(new_filepath))
|
||||
filesize = str(os.path.getsize(filepath))
|
||||
filesize = " " * (7 - len(filesize)) + filesize
|
||||
filetime = time.ctime(os.path.getmtime(filepath))
|
||||
print("- " + filepath + pad + " (" + filesize + " bytes, from " + filetime + ")")
|
||||
print("- " + new_filepath + pad + " (" + filesize + " bytes, from <stripped>)")
|
||||
if p.copyright_year:
|
||||
current_year = str(datetime.date.today().year)
|
||||
print()
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user