Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
c51aa79
Documentation
LICENSES
arch
block
certs
crypto
drivers
accel
accessibility
acpi
amba
android
ata
atm
auxdisplay
base
bcma
block
bluetooth
bus
cache
cdrom
cdx
char
clk
clocksource
comedi
connector
counter
cpufreq
cpuidle
crypto
cxl
dax
dca
devfreq
dio
dma-buf
dma
dpll
edac
eisa
extcon
firewire
firmware
fpga
fsi
gnss
gpio
gpu
drm
amd
acp
amdgpu
Kconfig
Makefile
ObjectID.h
aldebaran.c
aldebaran.h
aldebaran_reg_init.c
amdgpu.h
amdgpu_aca.c
amdgpu_aca.h
amdgpu_acp.c
amdgpu_acp.h
amdgpu_acpi.c
amdgpu_afmt.c
amdgpu_amdkfd.c
amdgpu_amdkfd.h
amdgpu_amdkfd_aldebaran.c
amdgpu_amdkfd_aldebaran.h
amdgpu_amdkfd_arcturus.c
amdgpu_amdkfd_arcturus.h
amdgpu_amdkfd_fence.c
amdgpu_amdkfd_gc_9_4_3.c
amdgpu_amdkfd_gfx_v10.c
amdgpu_amdkfd_gfx_v10.h
amdgpu_amdkfd_gfx_v10_3.c
amdgpu_amdkfd_gfx_v11.c
amdgpu_amdkfd_gfx_v12.c
amdgpu_amdkfd_gfx_v7.c
amdgpu_amdkfd_gfx_v8.c
amdgpu_amdkfd_gfx_v9.c
amdgpu_amdkfd_gfx_v9.h
amdgpu_amdkfd_gpuvm.c
amdgpu_atombios.c
amdgpu_atombios.h
amdgpu_atomfirmware.c
amdgpu_atomfirmware.h
amdgpu_atpx_handler.c
amdgpu_benchmark.c
amdgpu_bios.c
amdgpu_bo_list.c
amdgpu_bo_list.h
amdgpu_cgs.c
amdgpu_connectors.c
amdgpu_connectors.h
amdgpu_cper.c
amdgpu_cper.h
amdgpu_cs.c
amdgpu_cs.h
amdgpu_csa.c
amdgpu_csa.h
amdgpu_ctx.c
amdgpu_ctx.h
amdgpu_debugfs.c
amdgpu_debugfs.h
amdgpu_dev_coredump.c
amdgpu_dev_coredump.h
amdgpu_device.c
amdgpu_df.h
amdgpu_discovery.c
amdgpu_discovery.h
amdgpu_display.c
amdgpu_display.h
amdgpu_dma_buf.c
amdgpu_dma_buf.h
amdgpu_doorbell.h
amdgpu_doorbell_mgr.c
amdgpu_drv.c
amdgpu_drv.h
amdgpu_eeprom.c
amdgpu_eeprom.h
amdgpu_encoders.c
amdgpu_fdinfo.c
amdgpu_fdinfo.h
amdgpu_fence.c
amdgpu_fru_eeprom.c
amdgpu_fru_eeprom.h
amdgpu_fw_attestation.c
amdgpu_fw_attestation.h
amdgpu_gart.c
amdgpu_gart.h
amdgpu_gds.h
amdgpu_gem.c
amdgpu_gem.h
amdgpu_gfx.c
amdgpu_gfx.h
amdgpu_gfxhub.h
amdgpu_gmc.c
amdgpu_gmc.h
amdgpu_gtt_mgr.c
amdgpu_hdp.c
amdgpu_hdp.h
amdgpu_hmm.c
amdgpu_hmm.h
amdgpu_i2c.c
amdgpu_i2c.h
amdgpu_ib.c
amdgpu_ids.c
amdgpu_ids.h
amdgpu_ih.c
amdgpu_ih.h
amdgpu_imu.h
amdgpu_ioc32.c
amdgpu_irq.c
amdgpu_irq.h
amdgpu_isp.c
amdgpu_isp.h
amdgpu_job.c
amdgpu_job.h
amdgpu_jpeg.c
amdgpu_jpeg.h
amdgpu_kms.c
amdgpu_lsdma.c
amdgpu_lsdma.h
amdgpu_mca.c
amdgpu_mca.h
amdgpu_mes.c
amdgpu_mes.h
amdgpu_mes_ctx.h
amdgpu_mmhub.c
amdgpu_mmhub.h
amdgpu_mode.h
amdgpu_nbio.c
amdgpu_nbio.h
amdgpu_object.c
amdgpu_object.h
amdgpu_pll.c
amdgpu_pll.h
amdgpu_pmu.c
amdgpu_pmu.h
amdgpu_preempt_mgr.c
amdgpu_psp.c
amdgpu_psp.h
amdgpu_psp_ta.c
amdgpu_psp_ta.h
amdgpu_rap.c
amdgpu_rap.h
amdgpu_ras.c
amdgpu_ras.h
amdgpu_ras_eeprom.c
amdgpu_ras_eeprom.h
amdgpu_res_cursor.h
amdgpu_reset.c
amdgpu_reset.h
amdgpu_ring.c
amdgpu_ring.h
amdgpu_ring_mux.c
amdgpu_ring_mux.h
amdgpu_rlc.c
amdgpu_rlc.h
amdgpu_sa.c
amdgpu_sched.c
amdgpu_sched.h
amdgpu_sdma.c
amdgpu_sdma.h
amdgpu_securedisplay.c
amdgpu_securedisplay.h
amdgpu_seq64.c
amdgpu_seq64.h
amdgpu_smuio.h
amdgpu_socbb.h
amdgpu_sync.c
amdgpu_sync.h
amdgpu_trace.h
amdgpu_trace_points.c
amdgpu_ttm.c
amdgpu_ttm.h
amdgpu_ucode.c
amdgpu_ucode.h
amdgpu_umc.c
amdgpu_umc.h
amdgpu_umr.h
amdgpu_umsch_mm.c
amdgpu_umsch_mm.h
amdgpu_uvd.c
amdgpu_uvd.h
amdgpu_vce.c
amdgpu_vce.h
amdgpu_vcn.c
amdgpu_vcn.h
amdgpu_vf_error.c
amdgpu_vf_error.h
amdgpu_virt.c
amdgpu_virt.h
amdgpu_vkms.c
amdgpu_vkms.h
amdgpu_vm.c
amdgpu_vm.h
amdgpu_vm_cpu.c
amdgpu_vm_pt.c
amdgpu_vm_sdma.c
amdgpu_vm_tlb_fence.c
amdgpu_vpe.c
amdgpu_vpe.h
amdgpu_vram_mgr.c
amdgpu_vram_mgr.h
amdgpu_xcp.c
amdgpu_xcp.h
amdgpu_xgmi.c
amdgpu_xgmi.h
amdgv_sriovmsg.h
aqua_vanjaram.c
arct_reg_init.c
athub_v1_0.c
athub_v1_0.h
athub_v2_0.c
athub_v2_0.h
athub_v2_1.c
athub_v2_1.h
athub_v3_0.c
athub_v3_0.h
athub_v4_1_0.c
athub_v4_1_0.h
atom.c
atom.h
atombios_crtc.c
atombios_crtc.h
atombios_dp.c
atombios_dp.h
atombios_encoders.c
atombios_encoders.h
atombios_i2c.c
atombios_i2c.h
cik.c
cik.h
cik_ih.c
cik_ih.h
cik_sdma.c
cik_sdma.h
cikd.h
clearstate_ci.h
clearstate_defs.h
clearstate_gfx10.h
clearstate_gfx11.h
clearstate_gfx12.h
clearstate_gfx9.h
clearstate_si.h
clearstate_vi.h
cz_ih.c
cz_ih.h
dce_v10_0.c
dce_v10_0.h
dce_v11_0.c
dce_v11_0.h
dce_v6_0.c
dce_v6_0.h
dce_v8_0.c
dce_v8_0.h
df_v1_7.c
df_v1_7.h
df_v3_6.c
df_v3_6.h
df_v4_15.c
df_v4_15.h
df_v4_3.c
df_v4_3.h
df_v4_6_2.c
df_v4_6_2.h
dimgrey_cavefish_reg_init.c
emu_soc.c
gfx_v10_0.c
gfx_v10_0.h
gfx_v10_0_cleaner_shader.h
gfx_v10_1_10_cleaner_shader.asm
gfx_v10_3_0_cleaner_shader.asm
gfx_v11_0.c
gfx_v11_0.h
gfx_v11_0_3.c
gfx_v11_0_3.h
gfx_v11_0_3_cleaner_shader.asm
gfx_v11_0_cleaner_shader.h
gfx_v12_0.c
gfx_v12_0.h
gfx_v6_0.c
gfx_v6_0.h
gfx_v7_0.c
gfx_v7_0.h
gfx_v8_0.c
gfx_v8_0.h
gfx_v9_0.c
gfx_v9_0.h
gfx_v9_0_cleaner_shader.h
gfx_v9_4.c
gfx_v9_4.h
gfx_v9_4_2.c
gfx_v9_4_2.h
gfx_v9_4_2_cleaner_shader.asm
gfx_v9_4_3.c
gfx_v9_4_3.h
gfx_v9_4_3_cleaner_shader.asm
gfx_v9_4_3_cleaner_shader.h
gfxhub_v11_5_0.c
gfxhub_v11_5_0.h
gfxhub_v12_0.c
gfxhub_v12_0.h
gfxhub_v1_0.c
gfxhub_v1_0.h
gfxhub_v1_1.c
gfxhub_v1_1.h
gfxhub_v1_2.c
gfxhub_v1_2.h
gfxhub_v2_0.c
gfxhub_v2_0.h
gfxhub_v2_1.c
gfxhub_v2_1.h
gfxhub_v3_0.c
gfxhub_v3_0.h
gfxhub_v3_0_3.c
gfxhub_v3_0_3.h
gmc_v10_0.c
gmc_v10_0.h
gmc_v11_0.c
gmc_v11_0.h
gmc_v12_0.c
gmc_v12_0.h
gmc_v6_0.c
gmc_v6_0.h
gmc_v7_0.c
gmc_v7_0.h
gmc_v8_0.c
gmc_v8_0.h
gmc_v9_0.c
gmc_v9_0.h
hdp_v4_0.c
hdp_v4_0.h
hdp_v5_0.c
hdp_v5_0.h
hdp_v5_2.c
hdp_v5_2.h
hdp_v6_0.c
hdp_v6_0.h
hdp_v7_0.c
hdp_v7_0.h
iceland_ih.c
iceland_ih.h
iceland_sdma_pkt_open.h
ih_v6_0.c
ih_v6_0.h
ih_v6_1.c
ih_v6_1.h
ih_v7_0.c
ih_v7_0.h
imu_v11_0.c
imu_v11_0.h
imu_v11_0_3.c
imu_v11_0_3.h
imu_v12_0.c
imu_v12_0.h
isp_v4_1_0.c
isp_v4_1_0.h
isp_v4_1_1.c
isp_v4_1_1.h
jpeg_v1_0.c
jpeg_v1_0.h
jpeg_v2_0.c
jpeg_v2_0.h
jpeg_v2_5.c
jpeg_v2_5.h
jpeg_v3_0.c
jpeg_v3_0.h
jpeg_v4_0.c
jpeg_v4_0.h
jpeg_v4_0_3.c
jpeg_v4_0_3.h
jpeg_v4_0_5.c
jpeg_v4_0_5.h
jpeg_v5_0_0.c
jpeg_v5_0_0.h
jpeg_v5_0_1.c
jpeg_v5_0_1.h
lsdma_v6_0.c
lsdma_v6_0.h
lsdma_v7_0.c
lsdma_v7_0.h
mca_v3_0.c
mca_v3_0.h
mes_v11_0.c
mes_v11_0.h
mes_v12_0.c
mes_v12_0.h
mmhub_v1_0.c
mmhub_v1_0.h
mmhub_v1_7.c
mmhub_v1_7.h
mmhub_v1_8.c
mmhub_v1_8.h
mmhub_v2_0.c
mmhub_v2_0.h
mmhub_v2_3.c
mmhub_v2_3.h
mmhub_v3_0.c
mmhub_v3_0.h
mmhub_v3_0_1.c
mmhub_v3_0_1.h
mmhub_v3_0_2.c
mmhub_v3_0_2.h
mmhub_v3_3.c
mmhub_v3_3.h
mmhub_v4_1_0.c
mmhub_v4_1_0.h
mmhub_v9_4.c
mmhub_v9_4.h
mmsch_v1_0.h
mmsch_v2_0.h
mmsch_v3_0.h
mmsch_v4_0.h
mmsch_v4_0_3.h
mxgpu_ai.c
mxgpu_ai.h
mxgpu_nv.c
mxgpu_nv.h
mxgpu_vi.c
mxgpu_vi.h
navi10_ih.c
navi10_ih.h
navi10_sdma_pkt_open.h
nbif_v6_3_1.c
nbif_v6_3_1.h
nbio_v2_3.c
nbio_v2_3.h
nbio_v4_3.c
nbio_v4_3.h
nbio_v6_1.c
nbio_v6_1.h
nbio_v7_0.c
nbio_v7_0.h
nbio_v7_11.c
nbio_v7_11.h
nbio_v7_2.c
nbio_v7_2.h
nbio_v7_4.c
nbio_v7_4.h
nbio_v7_7.c
nbio_v7_7.h
nbio_v7_9.c
nbio_v7_9.h
nv.c
nv.h
nvd.h
psp_gfx_if.h
psp_v10_0.c
psp_v10_0.h
psp_v11_0.c
psp_v11_0.h
psp_v11_0_8.c
psp_v11_0_8.h
psp_v12_0.c
psp_v12_0.h
psp_v13_0.c
psp_v13_0.h
psp_v13_0_4.c
psp_v13_0_4.h
psp_v14_0.c
psp_v14_0.h
psp_v3_1.c
psp_v3_1.h
sdma_common.h
sdma_v2_4.c
sdma_v2_4.h
sdma_v3_0.c
sdma_v3_0.h
sdma_v4_0.c
sdma_v4_0.h
sdma_v4_4.c
sdma_v4_4.h
sdma_v4_4_2.c
sdma_v4_4_2.h
sdma_v5_0.c
sdma_v5_0.h
sdma_v5_2.c
sdma_v5_2.h
sdma_v6_0.c
sdma_v6_0.h
sdma_v6_0_0_pkt_open.h
sdma_v7_0.c
sdma_v7_0.h
si.c
si.h
si_dma.c
si_dma.h
si_enums.h
si_ih.c
si_ih.h
sid.h
sienna_cichlid.c
sienna_cichlid.h
smu_v11_0_i2c.c
smu_v11_0_i2c.h
smu_v13_0_10.c
smu_v13_0_10.h
smuio_v11_0.c
smuio_v11_0.h
smuio_v11_0_6.c
smuio_v11_0_6.h
smuio_v13_0.c
smuio_v13_0.h
smuio_v13_0_3.c
smuio_v13_0_3.h
smuio_v13_0_6.c
smuio_v13_0_6.h
smuio_v14_0_2.c
smuio_v14_0_2.h
smuio_v9_0.c
smuio_v9_0.h
soc15.c
soc15.h
soc15_common.h
soc15d.h
soc21.c
soc21.h
soc24.c
soc24.h
ta_rap_if.h
ta_ras_if.h
ta_secureDisplay_if.h
ta_xgmi_if.h
tonga_ih.c
tonga_ih.h
tonga_sdma_pkt_open.h
umc_v12_0.c
umc_v12_0.h
umc_v6_0.c
umc_v6_0.h
umc_v6_1.c
umc_v6_1.h
umc_v6_7.c
umc_v6_7.h
umc_v8_10.c
umc_v8_10.h
umc_v8_14.c
umc_v8_14.h
umc_v8_7.c
umc_v8_7.h
umsch_mm_v4_0.c
umsch_mm_v4_0.h
uvd_v3_1.c
uvd_v3_1.h
uvd_v4_2.c
uvd_v4_2.h
uvd_v5_0.c
uvd_v5_0.h
uvd_v6_0.c
uvd_v6_0.h
uvd_v7_0.c
uvd_v7_0.h
vce_v2_0.c
vce_v2_0.h
vce_v3_0.c
vce_v3_0.h
vce_v4_0.c
vce_v4_0.h
vcn_sw_ring.c
vcn_sw_ring.h
vcn_v1_0.c
vcn_v1_0.h
vcn_v2_0.c
vcn_v2_0.h
vcn_v2_5.c
vcn_v2_5.h
vcn_v3_0.c
vcn_v3_0.h
vcn_v4_0.c
vcn_v4_0.h
vcn_v4_0_3.c
vcn_v4_0_3.h
vcn_v4_0_5.c
vcn_v4_0_5.h
vcn_v5_0_0.c
vcn_v5_0_0.h
vcn_v5_0_1.c
vcn_v5_0_1.h
vega10_ih.c
vega10_ih.h
vega10_reg_init.c
vega10_sdma_pkt_open.h
vega20_ih.c
vega20_ih.h
vega20_reg_init.c
vi.c
vi.h
vid.h
vpe_6_1_fw_if.h
vpe_v6_1.c
vpe_v6_1.h
amdkfd
amdxcp
display
include
pm
arm
armada
aspeed
ast
atmel-hlcdc
bridge
ci
clients
display
etnaviv
exynos
fsl-dcu
gma500
gud
hisilicon
hyperv
i2c
i915
imagination
imx
ingenic
kmb
lib
lima
logicvc
loongson
mcde
mediatek
meson
mgag200
msm
mxsfb
nouveau
omapdrm
panel
panfrost
panthor
pl111
qxl
radeon
renesas
rockchip
scheduler
solomon
sprd
sti
stm
sun4i
tegra
tests
tidss
tilcdc
tiny
ttm
tve200
udl
v3d
vboxvideo
vc4
vgem
virtio
vkms
vmwgfx
xe
xen
xlnx
Kconfig
Makefile
drm_atomic.c
drm_atomic_helper.c
drm_atomic_state_helper.c
drm_atomic_uapi.c
drm_auth.c
drm_blend.c
drm_bridge.c
drm_buddy.c
drm_cache.c
drm_client.c
drm_client_event.c
drm_client_modeset.c
drm_color_mgmt.c
drm_connector.c
drm_crtc.c
drm_crtc_helper.c
drm_crtc_helper_internal.h
drm_crtc_internal.h
drm_damage_helper.c
drm_debugfs.c
drm_debugfs_crc.c
drm_displayid.c
drm_displayid_internal.h
drm_draw.c
drm_draw_internal.h
drm_drv.c
drm_dumb_buffers.c
drm_edid.c
drm_edid_load.c
drm_eld.c
drm_encoder.c
drm_encoder_slave.c
drm_exec.c
drm_fb_dma_helper.c
drm_fb_helper.c
drm_fbdev_dma.c
drm_fbdev_shmem.c
drm_fbdev_ttm.c
drm_file.c
drm_flip_work.c
drm_format_helper.c
drm_fourcc.c
drm_framebuffer.c
drm_gem.c
drm_gem_atomic_helper.c
drm_gem_dma_helper.c
drm_gem_framebuffer_helper.c
drm_gem_shmem_helper.c
drm_gem_ttm_helper.c
drm_gem_vram_helper.c
drm_gpuvm.c
drm_internal.h
drm_ioc32.c
drm_ioctl.c
drm_kms_helper_common.c
drm_lease.c
drm_managed.c
drm_mipi_dbi.c
drm_mipi_dsi.c
drm_mm.c
drm_mode_config.c
drm_mode_object.c
drm_modes.c
drm_modeset_helper.c
drm_modeset_lock.c
drm_of.c
drm_panel.c
drm_panel_backlight_quirks.c
drm_panel_orientation_quirks.c
drm_panic.c
drm_panic_qr.rs
drm_pci.c
drm_plane.c
drm_plane_helper.c
drm_prime.c
drm_print.c
drm_privacy_screen.c
drm_privacy_screen_x86.c
drm_probe_helper.c
drm_property.c
drm_rect.c
drm_self_refresh_helper.c
drm_simple_kms_helper.c
drm_suballoc.c
drm_syncobj.c
drm_sysfs.c
drm_trace.h
drm_trace_points.c
drm_vblank.c
drm_vblank_work.c
drm_vma_manager.c
drm_writeback.c
host1x
ipu-v3
trace
vga
Makefile
greybus
hid
hsi
hte
hv
hwmon
hwspinlock
hwtracing
i2c
i3c
idle
iio
infiniband
input
interconnect
iommu
ipack
irqchip
isdn
leds
macintosh
mailbox
mcb
md
media
memory
memstick
message
mfd
misc
mmc
most
mtd
mux
net
nfc
ntb
nubus
nvdimm
nvme
nvmem
of
opp
parisc
parport
pci
pcmcia
peci
perf
phy
pinctrl
platform
pmdomain
pnp
power
powercap
pps
ps3
ptp
pwm
rapidio
ras
regulator
remoteproc
reset
rpmsg
rtc
s390
sbus
scsi
sh
siox
slimbus
soc
soundwire
spi
spmi
ssb
staging
target
tc
tee
thermal
thunderbolt
tty
ufs
uio
usb
vdpa
vfio
vhost
video
virt
virtio
w1
watchdog
xen
zorro
Kconfig
Makefile
fs
include
init
io_uring
ipc
kernel
lib
mm
net
rust
samples
scripts
security
sound
tools
usr
virt
.clang-format
.clippy.toml
.cocciconfig
.editorconfig
.get_maintainer.ignore
.gitattributes
.gitignore
.mailmap
.rustfmt.toml
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
Breadcrumbs
linux
/
drivers
/
gpu
/
drm
/
amd
/
amdgpu
/
amdgpu_vcn.c
Blame
Blame
Latest commit
History
History
1434 lines (1183 loc) · 38.5 KB
Breadcrumbs
linux
/
drivers
/
gpu
/
drm
/
amd
/
amdgpu
/
amdgpu_vcn.c
Top
File metadata and controls
Code
Blame
1434 lines (1183 loc) · 38.5 KB
Raw
/* * Copyright 2016-2024 Advanced Micro Devices, Inc. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * */ #include <linux/firmware.h> #include <linux/module.h> #include <linux/dmi.h> #include <linux/pci.h> #include <linux/debugfs.h> #include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_pm.h" #include "amdgpu_vcn.h" #include "soc15d.h" /* Firmware Names */ #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin" #define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin" #define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin" #define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin" #define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin" #define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin" #define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin" #define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin" #define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin" #define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin" #define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin" #define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin" #define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin" #define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin" #define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin" #define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin" #define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin" #define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin" #define FIRMWARE_VCN4_0_3 "amdgpu/vcn_4_0_3.bin" #define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin" #define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin" #define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin" #define FIRMWARE_VCN4_0_6_1 "amdgpu/vcn_4_0_6_1.bin" #define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin" #define FIRMWARE_VCN5_0_1 "amdgpu/vcn_5_0_1.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); MODULE_FIRMWARE(FIRMWARE_PICASSO); MODULE_FIRMWARE(FIRMWARE_RAVEN2); MODULE_FIRMWARE(FIRMWARE_ARCTURUS); MODULE_FIRMWARE(FIRMWARE_RENOIR); MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE); MODULE_FIRMWARE(FIRMWARE_ALDEBARAN); MODULE_FIRMWARE(FIRMWARE_NAVI10); MODULE_FIRMWARE(FIRMWARE_NAVI14); MODULE_FIRMWARE(FIRMWARE_NAVI12); MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID); MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER); MODULE_FIRMWARE(FIRMWARE_VANGOGH); MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH); MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY); MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP); MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2); MODULE_FIRMWARE(FIRMWARE_VCN4_0_0); MODULE_FIRMWARE(FIRMWARE_VCN4_0_2); MODULE_FIRMWARE(FIRMWARE_VCN4_0_3); MODULE_FIRMWARE(FIRMWARE_VCN4_0_4); MODULE_FIRMWARE(FIRMWARE_VCN4_0_5); MODULE_FIRMWARE(FIRMWARE_VCN4_0_6); MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1); MODULE_FIRMWARE(FIRMWARE_VCN5_0_0); MODULE_FIRMWARE(FIRMWARE_VCN5_0_1); static void amdgpu_vcn_idle_work_handler(struct work_struct *work); int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i) { char ucode_prefix[25]; int r; adev->vcn.inst[i].adev = adev; adev->vcn.inst[i].inst = i; amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix)); if (i != 0 && adev->vcn.per_inst_fw) { r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw, AMDGPU_UCODE_REQUIRED, "amdgpu/%s_%d.bin", ucode_prefix, i); if (r) amdgpu_ucode_release(&adev->vcn.inst[i].fw); } else { if (!adev->vcn.inst[0].fw) { r = amdgpu_ucode_request(adev, &adev->vcn.inst[0].fw, AMDGPU_UCODE_REQUIRED, "amdgpu/%s.bin", ucode_prefix); if (r) amdgpu_ucode_release(&adev->vcn.inst[0].fw); } else { r = 0; } adev->vcn.inst[i].fw = adev->vcn.inst[0].fw; } return r; } int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i) { unsigned long bo_size; const struct common_firmware_header *hdr; unsigned char fw_check; unsigned int fw_shared_size, log_offset; int r; mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround); mutex_init(&adev->vcn.inst[i].vcn_pg_lock); atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0); INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler); atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.inst[i].indirect_sram = true; /* * Some Steam Deck's BIOS versions are incompatible with the * indirect SRAM mode, leading to amdgpu being unable to get * properly probed (and even potentially crashing the kernel). * Hence, check for these versions here - notice this is * restricted to Vangogh (Deck's APU). */ if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 0, 2)) { const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION); if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) || !strncmp("F7A0114", bios_ver, 7))) { adev->vcn.inst[i].indirect_sram = false; dev_info(adev->dev, "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver); } } /* from vcn4 and above, only unified queue is used */ adev->vcn.inst[i].using_unified_queue = amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0); hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data; adev->vcn.inst[i].fw_version = le32_to_cpu(hdr->ucode_version); adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); /* Bit 20-23, it is encode major and non-zero for new naming convention. * This field is part of version minor and DRM_DISABLED_FLAG in old naming * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG * is zero in old naming convention, this field is always zero so far. * These four bits are used to tell which naming convention is present. */ fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf; if (fw_check) { unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev; fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff; enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff; enc_major = fw_check; dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf; vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf; DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n", enc_major, enc_minor, dec_ver, vep, fw_rev); } else { unsigned int version_major, version_minor, family_id; family_id = le32_to_cpu(hdr->ucode_version) & 0xff; version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n", version_major, version_minor, family_id); } bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(5, 0, 0)) { fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)); log_offset = offsetof(struct amdgpu_vcn5_fw_shared, fw_log); } else if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) { fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)); log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log); } else { fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)); log_offset = offsetof(struct amdgpu_fw_shared, fw_log); } bo_size += fw_shared_size; if (amdgpu_vcnfw_log) bo_size += AMDGPU_VCNFW_LOG_SIZE; r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, &adev->vcn.inst[i].vcpu_bo, &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr); if (r) { dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); return r; } adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr + bo_size - fw_shared_size; adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr + bo_size - fw_shared_size; adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size; if (amdgpu_vcnfw_log) { adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE; adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE; adev->vcn.inst[i].fw_shared.log_offset = log_offset; } if (adev->vcn.inst[i].indirect_sram) { r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT, &adev->vcn.inst[i].dpg_sram_bo, &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr); if (r) { dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r); return r; } } return 0; } int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i) { int j; if (adev->vcn.harvest_config & (1 << i)) return 0; amdgpu_bo_free_kernel( &adev->vcn.inst[i].dpg_sram_bo, &adev->vcn.inst[i].dpg_sram_gpu_addr, (void **)&adev->vcn.inst[i].dpg_sram_cpu_addr); kvfree(adev->vcn.inst[i].saved_bo); amdgpu_bo_free_kernel(&adev->vcn.inst[i].vcpu_bo, &adev->vcn.inst[i].gpu_addr, (void **)&adev->vcn.inst[i].cpu_addr); amdgpu_ring_fini(&adev->vcn.inst[i].ring_dec); for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) amdgpu_ring_fini(&adev->vcn.inst[i].ring_enc[j]); if (adev->vcn.per_inst_fw) { amdgpu_ucode_release(&adev->vcn.inst[i].fw); } else { amdgpu_ucode_release(&adev->vcn.inst[0].fw); adev->vcn.inst[i].fw = NULL; } mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock); mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround); return 0; } bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance) { bool ret = false; int vcn_config = adev->vcn.inst[vcn_instance].vcn_config; if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK)) ret = true; else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK)) ret = true; else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK)) ret = true; return ret; } static int amdgpu_vcn_save_vcpu_bo_inst(struct amdgpu_device *adev, int i) { unsigned int size; void *ptr; int idx; if (adev->vcn.harvest_config & (1 << i)) return 0; if (adev->vcn.inst[i].vcpu_bo == NULL) return 0; size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); ptr = adev->vcn.inst[i].cpu_addr; adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL); if (!adev->vcn.inst[i].saved_bo) return -ENOMEM; if (drm_dev_enter(adev_to_drm(adev), &idx)) { memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); drm_dev_exit(idx); } return 0; } int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev) { int ret, i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { ret = amdgpu_vcn_save_vcpu_bo_inst(adev, i); if (ret) return ret; } return 0; } int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i) { bool in_ras_intr = amdgpu_ras_intr_triggered(); if (adev->vcn.harvest_config & (1 << i)) return 0; cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work); /* err_event_athub will corrupt VCPU buffer, so we need to * restore fw data and clear buffer in amdgpu_vcn_resume() */ if (in_ras_intr) return 0; return amdgpu_vcn_save_vcpu_bo_inst(adev, i); } int amdgpu_vcn_resume(struct amdgpu_device *adev, int i) { unsigned int size; void *ptr; int idx; if (adev->vcn.harvest_config & (1 << i)) return 0; if (adev->vcn.inst[i].vcpu_bo == NULL) return -EINVAL; size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); ptr = adev->vcn.inst[i].cpu_addr; if (adev->vcn.inst[i].saved_bo != NULL) { if (drm_dev_enter(adev_to_drm(adev), &idx)) { memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); drm_dev_exit(idx); } kvfree(adev->vcn.inst[i].saved_bo); adev->vcn.inst[i].saved_bo = NULL; } else { const struct common_firmware_header *hdr; unsigned int offset; hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { offset = le32_to_cpu(hdr->ucode_array_offset_bytes); if (drm_dev_enter(adev_to_drm(adev), &idx)) { memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.inst[i].fw->data + offset, le32_to_cpu(hdr->ucode_size_bytes)); drm_dev_exit(idx); } size -= le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes); } memset_io(ptr, 0, size); } return 0; } static void amdgpu_vcn_idle_work_handler(struct work_struct *work) { struct amdgpu_vcn_inst *vcn_inst = container_of(work, struct amdgpu_vcn_inst, idle_work.work); struct amdgpu_device *adev = vcn_inst->adev; unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; unsigned int i = vcn_inst->inst, j; int r = 0; if (adev->vcn.harvest_config & (1 << i)) return; for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[j]); /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && !adev->vcn.inst[i].using_unified_queue) { struct dpg_pause_state new_state; if (fence[i] || unlikely(atomic_read(&vcn_inst->dpg_enc_submission_cnt))) new_state.fw_based = VCN_DPG_STATE__PAUSE; else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; adev->vcn.inst[i].pause_dpg_mode(vcn_inst, &new_state); } fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_dec); fences += fence[i]; if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) { vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE); r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, false); if (r) dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r); } else { schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT); } } void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me]; int r = 0; atomic_inc(&vcn_inst->total_submission_cnt); if (!cancel_delayed_work_sync(&vcn_inst->idle_work)) { r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, true); if (r) dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r); } mutex_lock(&vcn_inst->vcn_pg_lock); vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE); /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && !vcn_inst->using_unified_queue) { struct dpg_pause_state new_state; if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) { atomic_inc(&vcn_inst->dpg_enc_submission_cnt); new_state.fw_based = VCN_DPG_STATE__PAUSE; } else { unsigned int fences = 0; unsigned int i; for (i = 0; i < vcn_inst->num_enc_rings; ++i) fences += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[i]); if (fences || atomic_read(&vcn_inst->dpg_enc_submission_cnt)) new_state.fw_based = VCN_DPG_STATE__PAUSE; else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; } vcn_inst->pause_dpg_mode(vcn_inst, &new_state); } mutex_unlock(&vcn_inst->vcn_pg_lock); } void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */ if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG && ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC && !adev->vcn.inst[ring->me].using_unified_queue) atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); atomic_dec(&ring->adev->vcn.inst[ring->me].total_submission_cnt); schedule_delayed_work(&ring->adev->vcn.inst[ring->me].idle_work, VCN_IDLE_TIMEOUT); } int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; uint32_t tmp = 0; unsigned int i; int r; /* VCN in SRIOV does not support direct register read/write */ if (amdgpu_sriov_vf(adev)) return 0; WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) return r; amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.scratch9, 0)); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9); if (tmp == 0xDEADBEEF) break; udelay(1); } if (i >= adev->usec_timeout) r = -ETIMEDOUT; return r; } int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; uint32_t rptr; unsigned int i; int r; if (amdgpu_sriov_vf(adev)) return 0; r = amdgpu_ring_alloc(ring, 16); if (r) return r; rptr = amdgpu_ring_get_rptr(ring); amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { if (amdgpu_ring_get_rptr(ring) != rptr) break; udelay(1); } if (i >= adev->usec_timeout) r = -ETIMEDOUT; return r; } static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_ib *ib_msg, struct dma_fence **fence) { u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); struct amdgpu_device *adev = ring->adev; struct dma_fence *f = NULL; struct amdgpu_job *job; struct amdgpu_ib *ib; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 64, AMDGPU_IB_POOL_DIRECT, &job); if (r) goto err; ib = &job->ibs[0]; ib->ptr[0] = PACKET0(adev->vcn.inst[ring->me].internal.data0, 0); ib->ptr[1] = addr; ib->ptr[2] = PACKET0(adev->vcn.inst[ring->me].internal.data1, 0); ib->ptr[3] = addr >> 32; ib->ptr[4] = PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0); ib->ptr[5] = 0; for (i = 6; i < 16; i += 2) { ib->ptr[i] = PACKET0(adev->vcn.inst[ring->me].internal.nop, 0); ib->ptr[i+1] = 0; } ib->length_dw = 16; r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err_free; amdgpu_ib_free(ib_msg, f); if (fence) *fence = dma_fence_get(f); dma_fence_put(f); return 0; err_free: amdgpu_job_free(job); err: amdgpu_ib_free(ib_msg, f); return r; } static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct amdgpu_ib *ib) { struct amdgpu_device *adev = ring->adev; uint32_t *msg; int r, i; memset(ib, 0, sizeof(*ib)); r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, AMDGPU_IB_POOL_DIRECT, ib); if (r) return r; msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr); msg[0] = cpu_to_le32(0x00000028); msg[1] = cpu_to_le32(0x00000038); msg[2] = cpu_to_le32(0x00000001); msg[3] = cpu_to_le32(0x00000000); msg[4] = cpu_to_le32(handle); msg[5] = cpu_to_le32(0x00000000); msg[6] = cpu_to_le32(0x00000001); msg[7] = cpu_to_le32(0x00000028); msg[8] = cpu_to_le32(0x00000010); msg[9] = cpu_to_le32(0x00000000); msg[10] = cpu_to_le32(0x00000007); msg[11] = cpu_to_le32(0x00000000); msg[12] = cpu_to_le32(0x00000780); msg[13] = cpu_to_le32(0x00000440); for (i = 14; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); return 0; } static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, struct amdgpu_ib *ib) { struct amdgpu_device *adev = ring->adev; uint32_t *msg; int r, i; memset(ib, 0, sizeof(*ib)); r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, AMDGPU_IB_POOL_DIRECT, ib); if (r) return r; msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr); msg[0] = cpu_to_le32(0x00000028); msg[1] = cpu_to_le32(0x00000018); msg[2] = cpu_to_le32(0x00000000); msg[3] = cpu_to_le32(0x00000002); msg[4] = cpu_to_le32(handle); msg[5] = cpu_to_le32(0x00000000); for (i = 6; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); return 0; } int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; struct amdgpu_ib ib; long r; r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib); if (r) goto error; r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL); if (r) goto error; r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib); if (r) goto error; r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence); if (r) goto error; r = dma_fence_wait_timeout(fence, false, timeout); if (r == 0) r = -ETIMEDOUT; else if (r > 0) r = 0; dma_fence_put(fence); error: return r; } static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib, uint32_t ib_pack_in_dw, bool enc) { uint32_t *ib_checksum; ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */ ib->ptr[ib->length_dw++] = 0x30000002; ib_checksum = &ib->ptr[ib->length_dw++]; ib->ptr[ib->length_dw++] = ib_pack_in_dw; ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */ ib->ptr[ib->length_dw++] = 0x30000001; ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3; ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t); return ib_checksum; } static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum, uint32_t ib_pack_in_dw) { uint32_t i; uint32_t checksum = 0; for (i = 0; i < ib_pack_in_dw; i++) checksum += *(*ib_checksum + 2 + i); **ib_checksum = checksum; } static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, struct amdgpu_ib *ib_msg, struct dma_fence **fence) { struct amdgpu_vcn_decode_buffer *decode_buffer = NULL; unsigned int ib_size_dw = 64; struct amdgpu_device *adev = ring->adev; struct dma_fence *f = NULL; struct amdgpu_job *job; struct amdgpu_ib *ib; uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); uint32_t *ib_checksum; uint32_t ib_pack_in_dw; int i, r; if (adev->vcn.inst[ring->me].using_unified_queue) ib_size_dw += 8; r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, &job); if (r) goto err; ib = &job->ibs[0]; ib->length_dw = 0; /* single queue headers */ if (adev->vcn.inst[ring->me].using_unified_queue) { ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t) + 4 + 2; /* engine info + decoding ib in dw */ ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false); } ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8; ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER); decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]); ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4; memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer)); decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER); decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32); decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr); for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; if (adev->vcn.inst[ring->me].using_unified_queue) amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw); r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err_free; amdgpu_ib_free(ib_msg, f); if (fence) *fence = dma_fence_get(f); dma_fence_put(f); return 0; err_free: amdgpu_job_free(job); err: amdgpu_ib_free(ib_msg, f); return r; } int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; struct amdgpu_ib ib; long r; r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib); if (r) goto error; r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL); if (r) goto error; r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib); if (r) goto error; r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence); if (r) goto error; r = dma_fence_wait_timeout(fence, false, timeout); if (r == 0) r = -ETIMEDOUT; else if (r > 0) r = 0; dma_fence_put(fence); error: return r; } int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; uint32_t rptr; unsigned int i; int r; if (amdgpu_sriov_vf(adev)) return 0; r = amdgpu_ring_alloc(ring, 16); if (r) return r; rptr = amdgpu_ring_get_rptr(ring); amdgpu_ring_write(ring, VCN_ENC_CMD_END); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { if (amdgpu_ring_get_rptr(ring) != rptr) break; udelay(1); } if (i >= adev->usec_timeout) r = -ETIMEDOUT; return r; } static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct amdgpu_ib *ib_msg, struct dma_fence **fence) { unsigned int ib_size_dw = 16; struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; uint32_t *ib_checksum = NULL; uint64_t addr; int i, r; if (adev->vcn.inst[ring->me].using_unified_queue) ib_size_dw += 8; r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; ib = &job->ibs[0]; addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); ib->length_dw = 0; if (adev->vcn.inst[ring->me].using_unified_queue) ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true); ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ ib->ptr[ib->length_dw++] = handle; ib->ptr[ib->length_dw++] = upper_32_bits(addr); ib->ptr[ib->length_dw++] = addr; ib->ptr[ib->length_dw++] = 0x00000000; ib->ptr[ib->length_dw++] = 0x00000014; ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ ib->ptr[ib->length_dw++] = 0x0000001c; ib->ptr[ib->length_dw++] = 0x00000000; ib->ptr[ib->length_dw++] = 0x00000000; ib->ptr[ib->length_dw++] = 0x00000008; ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */ for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; if (adev->vcn.inst[ring->me].using_unified_queue) amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11); r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err; if (fence) *fence = dma_fence_get(f); dma_fence_put(f); return 0; err: amdgpu_job_free(job); return r; } static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, struct amdgpu_ib *ib_msg, struct dma_fence **fence) { unsigned int ib_size_dw = 16; struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; uint32_t *ib_checksum = NULL; uint64_t addr; int i, r; if (adev->vcn.inst[ring->me].using_unified_queue) ib_size_dw += 8; r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; ib = &job->ibs[0]; addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); ib->length_dw = 0; if (adev->vcn.inst[ring->me].using_unified_queue) ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true); ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; ib->ptr[ib->length_dw++] = handle; ib->ptr[ib->length_dw++] = upper_32_bits(addr); ib->ptr[ib->length_dw++] = addr; ib->ptr[ib->length_dw++] = 0x00000000; ib->ptr[ib->length_dw++] = 0x00000014; ib->ptr[ib->length_dw++] = 0x00000002; ib->ptr[ib->length_dw++] = 0x0000001c; ib->ptr[ib->length_dw++] = 0x00000000; ib->ptr[ib->length_dw++] = 0x00000000; ib->ptr[ib->length_dw++] = 0x00000008; ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */ for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; if (adev->vcn.inst[ring->me].using_unified_queue) amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11); r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err; if (fence) *fence = dma_fence_get(f); dma_fence_put(f); return 0; err: amdgpu_job_free(job); return r; } int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct dma_fence *fence = NULL; struct amdgpu_ib ib; long r; memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE, AMDGPU_IB_POOL_DIRECT, &ib); if (r) return r; r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL); if (r) goto error; r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence); if (r) goto error; r = dma_fence_wait_timeout(fence, false, timeout); if (r == 0) r = -ETIMEDOUT; else if (r > 0) r = 0; error: amdgpu_ib_free(&ib, fence); dma_fence_put(fence); return r; } int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; long r; if ((amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) && (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(5, 0, 1))) { r = amdgpu_vcn_enc_ring_test_ib(ring, timeout); if (r) goto error; } r = amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout); error: return r; } enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring) { switch (ring) { case 0: return AMDGPU_RING_PRIO_0; case 1: return AMDGPU_RING_PRIO_1; case 2: return AMDGPU_RING_PRIO_2; default: return AMDGPU_RING_PRIO_0; } } void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev, int i) { unsigned int idx; if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { const struct common_firmware_header *hdr; if (adev->vcn.harvest_config & (1 << i)) return; if ((amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 3) || amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(5, 0, 1)) && (i > 0)) return; hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data; /* currently only support 2 FW instances */ if (i >= 2) { dev_info(adev->dev, "More then 2 VCN FW instances!\n"); return; } idx = AMDGPU_UCODE_ID_VCN + i; adev->firmware.ucode[idx].ucode_id = idx; adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw; adev->firmware.fw_size += ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); } } /* * debugfs for mapping vcn firmware log buffer. */ #if defined(CONFIG_DEBUG_FS) static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { struct amdgpu_vcn_inst *vcn; void *log_buf; volatile struct amdgpu_vcn_fwlog *plog; unsigned int read_pos, write_pos, available, i, read_bytes = 0; unsigned int read_num[2] = {0}; vcn = file_inode(f)->i_private; if (!vcn) return -ENODEV; if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log) return -EFAULT; log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; plog = (volatile struct amdgpu_vcn_fwlog *)log_buf; read_pos = plog->rptr; write_pos = plog->wptr; if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE) return -EFAULT; if (!size || (read_pos == write_pos)) return 0; if (write_pos > read_pos) { available = write_pos - read_pos; read_num[0] = min_t(size_t, size, available); } else { read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos; available = read_num[0] + write_pos - plog->header_size; if (size > available) read_num[1] = write_pos - plog->header_size; else if (size > read_num[0]) read_num[1] = size - read_num[0]; else read_num[0] = size; } for (i = 0; i < 2; i++) { if (read_num[i]) { if (read_pos == AMDGPU_VCNFW_LOG_SIZE) read_pos = plog->header_size; if (read_num[i] == copy_to_user((buf + read_bytes), (log_buf + read_pos), read_num[i])) return -EFAULT; read_bytes += read_num[i]; read_pos += read_num[i]; } } plog->rptr = read_pos; *pos += read_bytes; return read_bytes; } static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = { .owner = THIS_MODULE, .read = amdgpu_debugfs_vcn_fwlog_read, .llseek = default_llseek }; #endif void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i, struct amdgpu_vcn_inst *vcn) { #if defined(CONFIG_DEBUG_FS) struct drm_minor *minor = adev_to_drm(adev)->primary; struct dentry *root = minor->debugfs_root; char name[32]; sprintf(name, "amdgpu_vcn_%d_fwlog", i); debugfs_create_file_size(name, S_IFREG | 0444, root, vcn, &amdgpu_debugfs_vcnfwlog_fops, AMDGPU_VCNFW_LOG_SIZE); #endif } void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn) { #if defined(CONFIG_DEBUG_FS) volatile uint32_t *flag = vcn->fw_shared.cpu_addr; void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size; volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr; volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr + vcn->fw_shared.log_offset; *flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG); fw_log->is_enabled = 1; fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF); fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32); fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE); log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog); log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE; log_buf->rptr = log_buf->header_size; log_buf->wptr = log_buf->header_size; log_buf->wrapped = 0; #endif } int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { struct ras_common_if *ras_if = adev->vcn.ras_if; struct ras_dispatch_if ih_data = { .entry = entry, }; if (!ras_if) return 0; if (!amdgpu_sriov_vf(adev)) { ih_data.head = *ras_if; amdgpu_ras_interrupt_dispatch(adev, &ih_data); } else { if (adev->virt.ops && adev->virt.ops->ras_poison_handler) adev->virt.ops->ras_poison_handler(adev, ras_if->block); else dev_warn(adev->dev, "No ras_poison_handler interface in SRIOV for VCN!\n"); } return 0; } int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r, i; r = amdgpu_ras_block_late_init(adev, ras_block); if (r) return r; if (amdgpu_ras_is_supported(adev, ras_block->block)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { if (adev->vcn.harvest_config & (1 << i) || !adev->vcn.inst[i].ras_poison_irq.funcs) continue; r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0); if (r) goto late_fini; } } return 0; late_fini: amdgpu_ras_block_late_fini(adev, ras_block); return r; } int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) { int err; struct amdgpu_vcn_ras *ras; if (!adev->vcn.ras) return 0; ras = adev->vcn.ras; err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); if (err) { dev_err(adev->dev, "Failed to register vcn ras block!\n"); return err; } strcpy(ras->ras_block.ras_comm.name, "vcn"); ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN; ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON; adev->vcn.ras_if = &ras->ras_block.ras_comm; if (!ras->ras_block.ras_late_init) ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init; return 0; } int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx, enum AMDGPU_UCODE_ID ucode_id) { struct amdgpu_firmware_info ucode = { .ucode_id = (ucode_id ? ucode_id : (inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM : AMDGPU_UCODE_ID_VCN0_RAM)), .mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, .ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr), }; return psp_execute_ip_fw_load(&adev->psp, &ucode); } static ssize_t amdgpu_get_vcn_reset_mask(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); if (!adev) return -ENODEV; return amdgpu_show_reset_mask(buf, adev->vcn.supported_reset); } static DEVICE_ATTR(vcn_reset_mask, 0444, amdgpu_get_vcn_reset_mask, NULL); int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev) { int r = 0; if (adev->vcn.num_vcn_inst) { r = device_create_file(adev->dev, &dev_attr_vcn_reset_mask); if (r) return r; } return r; } void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev) { if (adev->dev->kobj.sd) { if (adev->vcn.num_vcn_inst) device_remove_file(adev->dev, &dev_attr_vcn_reset_mask); } } /* * debugfs to enable/disable vcn job submission to specific core or * instance. It is created only if the queue type is unified. */ #if defined(CONFIG_DEBUG_FS) static int amdgpu_debugfs_vcn_sched_mask_set(void *data, u64 val) { struct amdgpu_device *adev = (struct amdgpu_device *)data; u32 i; u64 mask; struct amdgpu_ring *ring; if (!adev) return -ENODEV; mask = (1ULL << adev->vcn.num_vcn_inst) - 1; if ((val & mask) == 0) return -EINVAL; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { ring = &adev->vcn.inst[i].ring_enc[0]; if (val & (1ULL << i)) ring->sched.ready = true; else ring->sched.ready = false; } /* publish sched.ready flag update effective immediately across smp */ smp_rmb(); return 0; } static int amdgpu_debugfs_vcn_sched_mask_get(void *data, u64 *val) { struct amdgpu_device *adev = (struct amdgpu_device *)data; u32 i; u64 mask = 0; struct amdgpu_ring *ring; if (!adev) return -ENODEV; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { ring = &adev->vcn.inst[i].ring_enc[0]; if (ring->sched.ready) mask |= 1ULL << i; } *val = mask; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_vcn_sched_mask_fops, amdgpu_debugfs_vcn_sched_mask_get, amdgpu_debugfs_vcn_sched_mask_set, "%llx\n"); #endif void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) struct drm_minor *minor = adev_to_drm(adev)->primary; struct dentry *root = minor->debugfs_root; char name[32]; if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.inst[0].using_unified_queue) return; sprintf(name, "amdgpu_vcn_sched_mask"); debugfs_create_file(name, 0600, root, adev, &amdgpu_debugfs_vcn_sched_mask_fops); #endif } /** * vcn_set_powergating_state - set VCN block powergating state * * @ip_block: amdgpu_ip_block pointer * @state: power gating state * * Set VCN block powergating state */ int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state) { struct amdgpu_device *adev = ip_block->adev; int ret = 0, i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; ret |= vinst->set_pg_state(vinst, state); } return ret; }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
You can’t perform that action at this time.