Skip to content

Commit

Permalink
Merge tag 'drm-next-2023-11-10' of git://anongit.freedesktop.org/drm/drm
Browse files Browse the repository at this point in the history
Pull drm fixes from Daniel Vetter:
 "Dave's VPN to the big machine died, so it's on me to do fixes pr this
  and next week while everyone else is at plumbers.

   - big pile of amd fixes, but mostly for hw support newly added in 6.7

   - i915 fixes, mostly minor things

   - qxl memory leak fix

   - vc4 uaf fix in mock helpers

   - syncobj fix for DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE"

* tag 'drm-next-2023-11-10' of git://anongit.freedesktop.org/drm/drm: (78 commits)
  drm/amdgpu: fix error handling in amdgpu_vm_init
  drm/amdgpu: Fix possible null pointer dereference
  drm/amdgpu: move UVD and VCE sched entity init after sched init
  drm/amdgpu: move kfd_resume before the ip late init
  drm/amd: Explicitly check for GFXOFF to be enabled for s0ix
  drm/amdgpu: Change WREG32_RLC to WREG32_SOC15_RLC where inst != 0 (v2)
  drm/amdgpu: Use correct KIQ MEC engine for gfx9.4.3 (v5)
  drm/amdgpu: add smu v13.0.6 pcs xgmi ras error query support
  drm/amdgpu: fix software pci_unplug on some chips
  drm/amd/display: remove duplicated argument
  drm/amdgpu: correct mca debugfs dump reg list
  drm/amdgpu: correct acclerator check architecutre dump
  drm/amdgpu: add pcs xgmi v6.4.0 ras support
  drm/amdgpu: Change extended-scope MTYPE on GC 9.4.3
  drm/amdgpu: disable smu v13.0.6 mca debug mode by default
  drm/amdgpu: Support multiple error query modes
  drm/amdgpu: refine smu v13.0.6 mca dump driver
  drm/amdgpu: Do not program PF-only regs in hdp_v4_0.c under SRIOV (v2)
  drm/amdgpu: Skip PCTL0_MMHUB_DEEPSLEEP_IB write in jpegv4.0.3 under SRIOV
  drm: amd: Resolve Sphinx unexpected indentation warning
  ...
  • Loading branch information
torvalds committed Nov 10, 2023
2 parents ac347a0 + 03df0fc commit c0d12d7
Show file tree
Hide file tree
Showing 97 changed files with 1,760 additions and 812 deletions.
13 changes: 11 additions & 2 deletions drivers/gpu/drm/amd/amdgpu/amdgpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -1159,11 +1159,18 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t acc_flags);
u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
u64 reg_addr);
uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t acc_flags,
uint32_t xcc_id);
void amdgpu_device_wreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t v,
uint32_t acc_flags);
void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
u64 reg_addr, u32 reg_data);
void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t v,
uint32_t acc_flags,
uint32_t xcc_id);
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
uint32_t reg, uint32_t v, uint32_t xcc_id);
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
Expand Down Expand Up @@ -1204,8 +1211,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)

#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg))
#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v))
#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg), 0)
#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v), 0)

#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
Expand All @@ -1215,6 +1222,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define RREG32_XCC(reg, inst) amdgpu_device_xcc_rreg(adev, (reg), 0, inst)
#define WREG32_XCC(reg, v, inst) amdgpu_device_xcc_wreg(adev, (reg), (v), 0, inst)
#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
#define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
Expand Down
3 changes: 3 additions & 0 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
Original file line number Diff line number Diff line change
Expand Up @@ -1494,6 +1494,9 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
if (adev->asic_type < CHIP_RAVEN)
return false;

if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return false;

/*
* If ACPI_FADT_LOW_POWER_S0 is not set in the FADT, it is generally
* risky to do any special firmware-related preparations for entering
Expand Down
40 changes: 18 additions & 22 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
Original file line number Diff line number Diff line change
Expand Up @@ -300,14 +300,13 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd,
hqd_end = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_AQL_DISPATCH_ID_HI);

for (reg = hqd_base; reg <= hqd_end; reg++)
WREG32_RLC(reg, mqd_hqd[reg - hqd_base]);
WREG32_XCC(reg, mqd_hqd[reg - hqd_base], inst);


/* Activate doorbell logic before triggering WPTR poll. */
data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_DOORBELL_CONTROL),
data);
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_DOORBELL_CONTROL, data);

if (wptr) {
/* Don't read wptr with get_user because the user
Expand Down Expand Up @@ -336,27 +335,24 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd,
guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;

WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_LO),
lower_32_bits(guessed_wptr));
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_HI),
upper_32_bits(guessed_wptr));
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR),
lower_32_bits((uintptr_t)wptr));
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_LO,
lower_32_bits(guessed_wptr));
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_HI,
upper_32_bits(guessed_wptr));
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR,
lower_32_bits((uintptr_t)wptr));
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
upper_32_bits((uintptr_t)wptr));
WREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_PQ_WPTR_POLL_CNTL1),
(uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id,
queue_id));
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_PQ_WPTR_POLL_CNTL1,
(uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id));
}

/* Start the EOP fetcher */
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_EOP_RPTR),
REG_SET_FIELD(m->cp_hqd_eop_rptr,
CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_EOP_RPTR,
REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1));

data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_ACTIVE), data);
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_ACTIVE, data);

kgd_gfx_v9_release_queue(adev, inst);

Expand Down Expand Up @@ -494,15 +490,15 @@ static uint32_t kgd_gfx_v9_4_3_set_address_watch(
VALID,
1);

WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
WREG32_XCC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
regTCP_WATCH0_ADDR_H) +
(watch_id * TCP_WATCH_STRIDE)),
watch_address_high);
watch_address_high, inst);

WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
WREG32_XCC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
regTCP_WATCH0_ADDR_L) +
(watch_id * TCP_WATCH_STRIDE)),
watch_address_low);
watch_address_low, inst);

return watch_address_cntl;
}
Expand Down
42 changes: 20 additions & 22 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,8 @@ void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi
{
kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst);

WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmSH_MEM_CONFIG), sh_mem_config);
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmSH_MEM_BASES), sh_mem_bases);
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmSH_MEM_CONFIG, sh_mem_config);
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmSH_MEM_BASES, sh_mem_bases);
/* APE1 no longer exists on GFX9 */

kgd_gfx_v9_unlock_srbm(adev, inst);
Expand Down Expand Up @@ -239,14 +239,13 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,

for (reg = hqd_base;
reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI); reg++)
WREG32_RLC(reg, mqd_hqd[reg - hqd_base]);
WREG32_XCC(reg, mqd_hqd[reg - hqd_base], inst);


/* Activate doorbell logic before triggering WPTR poll. */
data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL),
data);
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL, data);

if (wptr) {
/* Don't read wptr with get_user because the user
Expand Down Expand Up @@ -275,25 +274,24 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,
guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;

WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_LO),
lower_32_bits(guessed_wptr));
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI),
upper_32_bits(guessed_wptr));
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR),
lower_32_bits((uintptr_t)wptr));
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
upper_32_bits((uintptr_t)wptr));
WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_PQ_WPTR_POLL_CNTL1,
(uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id));
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_LO,
lower_32_bits(guessed_wptr));
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI,
upper_32_bits(guessed_wptr));
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR,
lower_32_bits((uintptr_t)wptr));
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
upper_32_bits((uintptr_t)wptr));
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_PQ_WPTR_POLL_CNTL1,
(uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id));
}

/* Start the EOP fetcher */
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_EOP_RPTR),
REG_SET_FIELD(m->cp_hqd_eop_rptr,
CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_EOP_RPTR,
REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1));

data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE), data);
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE, data);

kgd_gfx_v9_release_queue(adev, inst);

Expand Down Expand Up @@ -556,7 +554,7 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
break;
}

WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_DEQUEUE_REQUEST), type);
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_DEQUEUE_REQUEST, type);

end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
Expand Down Expand Up @@ -908,8 +906,8 @@ void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev,
uint32_t inst)

{
*wait_times = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
mmCP_IQ_WAIT_TIME2));
*wait_times = RREG32_SOC15_RLC(GC, GET_INST(GC, inst),
mmCP_IQ_WAIT_TIME2);
}

void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,7 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
}

rcu_read_unlock();
*result = NULL;
return -ENOENT;
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1415,7 +1415,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r == -ENOMEM)
DRM_ERROR("Not enough memory for command submission!\n");
else if (r != -ERESTARTSYS && r != -EAGAIN)
DRM_ERROR("Failed to process the buffer list %d!\n", r);
DRM_DEBUG("Failed to process the buffer list %d!\n", r);
goto error_fini;
}

Expand Down
114 changes: 105 additions & 9 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@
#include "amdgpu_pmu.h"
#include "amdgpu_fru_eeprom.h"
#include "amdgpu_reset.h"
#include "amdgpu_virt.h"

#include <linux/suspend.h>
#include <drm/task_barrier.h>
Expand Down Expand Up @@ -472,7 +473,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
amdgpu_sriov_runtime(adev) &&
down_read_trylock(&adev->reset_domain->sem)) {
ret = amdgpu_kiq_rreg(adev, reg);
ret = amdgpu_kiq_rreg(adev, reg, 0);
up_read(&adev->reset_domain->sem);
} else {
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
Expand Down Expand Up @@ -509,6 +510,49 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
BUG();
}


/**
* amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
*
* @adev: amdgpu_device pointer
* @reg: dword aligned register offset
* @acc_flags: access flags which require special behavior
* @xcc_id: xcc accelerated compute core id
*
* Returns the 32 bit value from the offset specified.
*/
uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t acc_flags,
uint32_t xcc_id)
{
uint32_t ret, rlcg_flag;

if (amdgpu_device_skip_hw_access(adev))
return 0;

if ((reg * 4) < adev->rmmio_size) {
if (amdgpu_sriov_vf(adev) &&
!amdgpu_sriov_runtime(adev) &&
adev->gfx.rlc.rlcg_reg_access_supported &&
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
GC_HWIP, false,
&rlcg_flag)) {
ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, xcc_id);
} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
amdgpu_sriov_runtime(adev) &&
down_read_trylock(&adev->reset_domain->sem)) {
ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
up_read(&adev->reset_domain->sem);
} else {
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
}
} else {
ret = adev->pcie_rreg(adev, reg * 4);
}

return ret;
}

/*
* MMIO register write with bytes helper functions
* @offset:bytes offset from MMIO start
Expand Down Expand Up @@ -556,7 +600,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
amdgpu_sriov_runtime(adev) &&
down_read_trylock(&adev->reset_domain->sem)) {
amdgpu_kiq_wreg(adev, reg, v);
amdgpu_kiq_wreg(adev, reg, v, 0);
up_read(&adev->reset_domain->sem);
} else {
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
Expand Down Expand Up @@ -597,6 +641,47 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
}
}

/**
* amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
*
* @adev: amdgpu_device pointer
* @reg: dword aligned register offset
* @v: 32 bit value to write to the register
* @acc_flags: access flags which require special behavior
* @xcc_id: xcc accelerated compute core id
*
* Writes the value specified to the offset specified.
*/
void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t v,
uint32_t acc_flags, uint32_t xcc_id)
{
uint32_t rlcg_flag;

if (amdgpu_device_skip_hw_access(adev))
return;

if ((reg * 4) < adev->rmmio_size) {
if (amdgpu_sriov_vf(adev) &&
!amdgpu_sriov_runtime(adev) &&
adev->gfx.rlc.rlcg_reg_access_supported &&
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
GC_HWIP, true,
&rlcg_flag)) {
amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, xcc_id);
} else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
amdgpu_sriov_runtime(adev) &&
down_read_trylock(&adev->reset_domain->sem)) {
amdgpu_kiq_wreg(adev, reg, v, xcc_id);
up_read(&adev->reset_domain->sem);
} else {
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
}
} else {
adev->pcie_wreg(adev, reg * 4, v);
}
}

/**
* amdgpu_device_indirect_rreg - read an indirect register
*
Expand Down Expand Up @@ -2499,6 +2584,18 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
ring->name);
return r;
}
r = amdgpu_uvd_entity_init(adev, ring);
if (r) {
DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
ring->name);
return r;
}
r = amdgpu_vce_entity_init(adev, ring);
if (r) {
DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
ring->name);
return r;
}
}

amdgpu_xcp_update_partition_sched_list(adev);
Expand Down Expand Up @@ -4486,19 +4583,18 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
}
amdgpu_fence_driver_hw_init(adev);

r = amdgpu_device_ip_late_init(adev);
if (r)
goto exit;

queue_delayed_work(system_wq, &adev->delayed_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));

if (!adev->in_s0ix) {
r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
if (r)
goto exit;
}

r = amdgpu_device_ip_late_init(adev);
if (r)
goto exit;

queue_delayed_work(system_wq, &adev->delayed_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
exit:
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_init_data_exchange(adev);
Expand Down
Loading

0 comments on commit c0d12d7

Please sign in to comment.