Skip to content
Navigation Menu
Toggle navigation
Sign in
In this repository
All GitHub Enterprise
↵
Jump to
↵
No suggested jump to results
In this repository
All GitHub Enterprise
↵
Jump to
↵
In this organization
All GitHub Enterprise
↵
Jump to
↵
In this repository
All GitHub Enterprise
↵
Jump to
↵
Sign in
Reseting focus
You signed in with another tab or window.
Reload
to refresh your session.
You signed out in another tab or window.
Reload
to refresh your session.
You switched accounts on another tab or window.
Reload
to refresh your session.
Dismiss alert
{{ message }}
mariux64
/
linux
Public
Notifications
You must be signed in to change notification settings
Fork
0
Star
0
Code
Issues
2
Pull requests
0
Actions
Projects
0
Wiki
Security
Insights
Additional navigation options
Code
Issues
Pull requests
Actions
Projects
Wiki
Security
Insights
Files
acdc43f
Documentation
LICENSES
arch
block
certs
crypto
drivers
accel
accessibility
acpi
amba
android
ata
atm
auxdisplay
base
bcma
block
bluetooth
bus
cache
cdrom
cdx
char
clk
clocksource
comedi
connector
counter
cpufreq
cpuidle
crypto
cxl
dax
dca
devfreq
dio
dma-buf
dma
dpll
edac
eisa
extcon
firewire
firmware
fpga
fsi
gnss
gpio
gpu
drm
adp
amd
acp
amdgpu
Kconfig
Makefile
ObjectID.h
aldebaran.c
aldebaran.h
aldebaran_reg_init.c
amdgpu.h
amdgpu_aca.c
amdgpu_aca.h
amdgpu_acp.c
amdgpu_acp.h
amdgpu_acpi.c
amdgpu_afmt.c
amdgpu_amdkfd.c
amdgpu_amdkfd.h
amdgpu_amdkfd_aldebaran.c
amdgpu_amdkfd_aldebaran.h
amdgpu_amdkfd_arcturus.c
amdgpu_amdkfd_arcturus.h
amdgpu_amdkfd_fence.c
amdgpu_amdkfd_gc_9_4_3.c
amdgpu_amdkfd_gfx_v10.c
amdgpu_amdkfd_gfx_v10.h
amdgpu_amdkfd_gfx_v10_3.c
amdgpu_amdkfd_gfx_v11.c
amdgpu_amdkfd_gfx_v12.c
amdgpu_amdkfd_gfx_v7.c
amdgpu_amdkfd_gfx_v8.c
amdgpu_amdkfd_gfx_v9.c
amdgpu_amdkfd_gfx_v9.h
amdgpu_amdkfd_gpuvm.c
amdgpu_atombios.c
amdgpu_atombios.h
amdgpu_atomfirmware.c
amdgpu_atomfirmware.h
amdgpu_atpx_handler.c
amdgpu_benchmark.c
amdgpu_bios.c
amdgpu_bo_list.c
amdgpu_bo_list.h
amdgpu_cgs.c
amdgpu_connectors.c
amdgpu_connectors.h
amdgpu_cper.c
amdgpu_cper.h
amdgpu_cs.c
amdgpu_cs.h
amdgpu_csa.c
amdgpu_csa.h
amdgpu_ctx.c
amdgpu_ctx.h
amdgpu_debugfs.c
amdgpu_debugfs.h
amdgpu_dev_coredump.c
amdgpu_dev_coredump.h
amdgpu_device.c
amdgpu_df.h
amdgpu_discovery.c
amdgpu_discovery.h
amdgpu_display.c
amdgpu_display.h
amdgpu_dma_buf.c
amdgpu_dma_buf.h
amdgpu_doorbell.h
amdgpu_doorbell_mgr.c
amdgpu_drv.c
amdgpu_drv.h
amdgpu_eeprom.c
amdgpu_eeprom.h
amdgpu_encoders.c
amdgpu_eviction_fence.c
amdgpu_eviction_fence.h
amdgpu_fdinfo.c
amdgpu_fdinfo.h
amdgpu_fence.c
amdgpu_fru_eeprom.c
amdgpu_fru_eeprom.h
amdgpu_fw_attestation.c
amdgpu_fw_attestation.h
amdgpu_gart.c
amdgpu_gart.h
amdgpu_gds.h
amdgpu_gem.c
amdgpu_gem.h
amdgpu_gfx.c
amdgpu_gfx.h
amdgpu_gfxhub.h
amdgpu_gmc.c
amdgpu_gmc.h
amdgpu_gtt_mgr.c
amdgpu_hdp.c
amdgpu_hdp.h
amdgpu_hmm.c
amdgpu_hmm.h
amdgpu_i2c.c
amdgpu_i2c.h
amdgpu_ib.c
amdgpu_ids.c
amdgpu_ids.h
amdgpu_ih.c
amdgpu_ih.h
amdgpu_imu.h
amdgpu_ioc32.c
amdgpu_irq.c
amdgpu_irq.h
amdgpu_isp.c
amdgpu_isp.h
amdgpu_job.c
amdgpu_job.h
amdgpu_jpeg.c
amdgpu_jpeg.h
amdgpu_kms.c
amdgpu_lsdma.c
amdgpu_lsdma.h
amdgpu_mca.c
amdgpu_mca.h
amdgpu_mes.c
amdgpu_mes.h
amdgpu_mes_ctx.h
amdgpu_mmhub.c
amdgpu_mmhub.h
amdgpu_mode.h
amdgpu_nbio.c
amdgpu_nbio.h
amdgpu_object.c
amdgpu_object.h
amdgpu_pll.c
amdgpu_pll.h
amdgpu_pmu.c
amdgpu_pmu.h
amdgpu_preempt_mgr.c
amdgpu_psp.c
amdgpu_psp.h
amdgpu_psp_ta.c
amdgpu_psp_ta.h
amdgpu_rap.c
amdgpu_rap.h
amdgpu_ras.c
amdgpu_ras.h
amdgpu_ras_eeprom.c
amdgpu_ras_eeprom.h
amdgpu_res_cursor.h
amdgpu_reset.c
amdgpu_reset.h
amdgpu_ring.c
amdgpu_ring.h
amdgpu_ring_mux.c
amdgpu_ring_mux.h
amdgpu_rlc.c
amdgpu_rlc.h
amdgpu_sa.c
amdgpu_sched.c
amdgpu_sched.h
amdgpu_sdma.c
amdgpu_sdma.h
amdgpu_securedisplay.c
amdgpu_securedisplay.h
amdgpu_seq64.c
amdgpu_seq64.h
amdgpu_smuio.h
amdgpu_socbb.h
amdgpu_sync.c
amdgpu_sync.h
amdgpu_trace.h
amdgpu_trace_points.c
amdgpu_ttm.c
amdgpu_ttm.h
amdgpu_ucode.c
amdgpu_ucode.h
amdgpu_umc.c
amdgpu_umc.h
amdgpu_umr.h
amdgpu_umsch_mm.c
amdgpu_umsch_mm.h
amdgpu_userq_fence.c
amdgpu_userq_fence.h
amdgpu_userqueue.c
amdgpu_userqueue.h
amdgpu_uvd.c
amdgpu_uvd.h
amdgpu_vce.c
amdgpu_vce.h
amdgpu_vcn.c
amdgpu_vcn.h
amdgpu_vf_error.c
amdgpu_vf_error.h
amdgpu_virt.c
amdgpu_virt.h
amdgpu_vkms.c
amdgpu_vkms.h
amdgpu_vm.c
amdgpu_vm.h
amdgpu_vm_cpu.c
amdgpu_vm_pt.c
amdgpu_vm_sdma.c
amdgpu_vm_tlb_fence.c
amdgpu_vpe.c
amdgpu_vpe.h
amdgpu_vram_mgr.c
amdgpu_vram_mgr.h
amdgpu_xcp.c
amdgpu_xcp.h
amdgpu_xgmi.c
amdgpu_xgmi.h
amdgv_sriovmsg.h
aqua_vanjaram.c
arct_reg_init.c
athub_v1_0.c
athub_v1_0.h
athub_v2_0.c
athub_v2_0.h
athub_v2_1.c
athub_v2_1.h
athub_v3_0.c
athub_v3_0.h
athub_v4_1_0.c
athub_v4_1_0.h
atom.c
atom.h
atombios_crtc.c
atombios_crtc.h
atombios_dp.c
atombios_dp.h
atombios_encoders.c
atombios_encoders.h
atombios_i2c.c
atombios_i2c.h
cik.c
cik.h
cik_ih.c
cik_ih.h
cik_sdma.c
cik_sdma.h
cikd.h
clearstate_ci.h
clearstate_defs.h
clearstate_gfx10.h
clearstate_gfx11.h
clearstate_gfx12.h
clearstate_gfx9.h
clearstate_si.h
clearstate_vi.h
cz_ih.c
cz_ih.h
dce_v10_0.c
dce_v10_0.h
dce_v11_0.c
dce_v11_0.h
dce_v6_0.c
dce_v6_0.h
dce_v8_0.c
dce_v8_0.h
df_v1_7.c
df_v1_7.h
df_v3_6.c
df_v3_6.h
df_v4_15.c
df_v4_15.h
df_v4_3.c
df_v4_3.h
df_v4_6_2.c
df_v4_6_2.h
dimgrey_cavefish_reg_init.c
emu_soc.c
gfx_v10_0.c
gfx_v10_0.h
gfx_v10_0_cleaner_shader.h
gfx_v10_1_10_cleaner_shader.asm
gfx_v10_3_0_cleaner_shader.asm
gfx_v11_0.c
gfx_v11_0.h
gfx_v11_0_3.c
gfx_v11_0_3.h
gfx_v11_0_3_cleaner_shader.asm
gfx_v11_0_cleaner_shader.h
gfx_v12_0.c
gfx_v12_0.h
gfx_v6_0.c
gfx_v6_0.h
gfx_v7_0.c
gfx_v7_0.h
gfx_v8_0.c
gfx_v8_0.h
gfx_v9_0.c
gfx_v9_0.h
gfx_v9_0_cleaner_shader.h
gfx_v9_4.c
gfx_v9_4.h
gfx_v9_4_2.c
gfx_v9_4_2.h
gfx_v9_4_2_cleaner_shader.asm
gfx_v9_4_3.c
gfx_v9_4_3.h
gfx_v9_4_3_cleaner_shader.asm
gfx_v9_4_3_cleaner_shader.h
gfxhub_v11_5_0.c
gfxhub_v11_5_0.h
gfxhub_v12_0.c
gfxhub_v12_0.h
gfxhub_v1_0.c
gfxhub_v1_0.h
gfxhub_v1_1.c
gfxhub_v1_1.h
gfxhub_v1_2.c
gfxhub_v1_2.h
gfxhub_v2_0.c
gfxhub_v2_0.h
gfxhub_v2_1.c
gfxhub_v2_1.h
gfxhub_v3_0.c
gfxhub_v3_0.h
gfxhub_v3_0_3.c
gfxhub_v3_0_3.h
gmc_v10_0.c
gmc_v10_0.h
gmc_v11_0.c
gmc_v11_0.h
gmc_v12_0.c
gmc_v12_0.h
gmc_v6_0.c
gmc_v6_0.h
gmc_v7_0.c
gmc_v7_0.h
gmc_v8_0.c
gmc_v8_0.h
gmc_v9_0.c
gmc_v9_0.h
hdp_v4_0.c
hdp_v4_0.h
hdp_v5_0.c
hdp_v5_0.h
hdp_v5_2.c
hdp_v5_2.h
hdp_v6_0.c
hdp_v6_0.h
hdp_v7_0.c
hdp_v7_0.h
iceland_ih.c
iceland_ih.h
iceland_sdma_pkt_open.h
ih_v6_0.c
ih_v6_0.h
ih_v6_1.c
ih_v6_1.h
ih_v7_0.c
ih_v7_0.h
imu_v11_0.c
imu_v11_0.h
imu_v11_0_3.c
imu_v11_0_3.h
imu_v12_0.c
imu_v12_0.h
isp_v4_1_0.c
isp_v4_1_0.h
isp_v4_1_1.c
isp_v4_1_1.h
jpeg_v1_0.c
jpeg_v1_0.h
jpeg_v2_0.c
jpeg_v2_0.h
jpeg_v2_5.c
jpeg_v2_5.h
jpeg_v3_0.c
jpeg_v3_0.h
jpeg_v4_0.c
jpeg_v4_0.h
jpeg_v4_0_3.c
jpeg_v4_0_3.h
jpeg_v4_0_5.c
jpeg_v4_0_5.h
jpeg_v5_0_0.c
jpeg_v5_0_0.h
jpeg_v5_0_1.c
jpeg_v5_0_1.h
lsdma_v6_0.c
lsdma_v6_0.h
lsdma_v7_0.c
lsdma_v7_0.h
mca_v3_0.c
mca_v3_0.h
mes_userqueue.c
mes_userqueue.h
mes_v11_0.c
mes_v11_0.h
mes_v12_0.c
mes_v12_0.h
mmhub_v1_0.c
mmhub_v1_0.h
mmhub_v1_7.c
mmhub_v1_7.h
mmhub_v1_8.c
mmhub_v1_8.h
mmhub_v2_0.c
mmhub_v2_0.h
mmhub_v2_3.c
mmhub_v2_3.h
mmhub_v3_0.c
mmhub_v3_0.h
mmhub_v3_0_1.c
mmhub_v3_0_1.h
mmhub_v3_0_2.c
mmhub_v3_0_2.h
mmhub_v3_3.c
mmhub_v3_3.h
mmhub_v4_1_0.c
mmhub_v4_1_0.h
mmhub_v9_4.c
mmhub_v9_4.h
mmsch_v1_0.h
mmsch_v2_0.h
mmsch_v3_0.h
mmsch_v4_0.h
mmsch_v4_0_3.h
mxgpu_ai.c
mxgpu_ai.h
mxgpu_nv.c
mxgpu_nv.h
mxgpu_vi.c
mxgpu_vi.h
navi10_ih.c
navi10_ih.h
navi10_sdma_pkt_open.h
nbif_v6_3_1.c
nbif_v6_3_1.h
nbio_v2_3.c
nbio_v2_3.h
nbio_v4_3.c
nbio_v4_3.h
nbio_v6_1.c
nbio_v6_1.h
nbio_v7_0.c
nbio_v7_0.h
nbio_v7_11.c
nbio_v7_11.h
nbio_v7_2.c
nbio_v7_2.h
nbio_v7_4.c
nbio_v7_4.h
nbio_v7_7.c
nbio_v7_7.h
nbio_v7_9.c
nbio_v7_9.h
nv.c
nv.h
nvd.h
psp_gfx_if.h
psp_v10_0.c
psp_v10_0.h
psp_v11_0.c
psp_v11_0.h
psp_v11_0_8.c
psp_v11_0_8.h
psp_v12_0.c
psp_v12_0.h
psp_v13_0.c
psp_v13_0.h
psp_v13_0_4.c
psp_v13_0_4.h
psp_v14_0.c
psp_v14_0.h
psp_v3_1.c
psp_v3_1.h
sdma_common.h
sdma_v2_4.c
sdma_v2_4.h
sdma_v3_0.c
sdma_v3_0.h
sdma_v4_0.c
sdma_v4_0.h
sdma_v4_4.c
sdma_v4_4.h
sdma_v4_4_2.c
sdma_v4_4_2.h
sdma_v5_0.c
sdma_v5_0.h
sdma_v5_2.c
sdma_v5_2.h
sdma_v6_0.c
sdma_v6_0.h
sdma_v6_0_0_pkt_open.h
sdma_v7_0.c
sdma_v7_0.h
si.c
si.h
si_dma.c
si_dma.h
si_enums.h
si_ih.c
si_ih.h
sid.h
sienna_cichlid.c
sienna_cichlid.h
smu_v11_0_i2c.c
smu_v11_0_i2c.h
smu_v13_0_10.c
smu_v13_0_10.h
smuio_v11_0.c
smuio_v11_0.h
smuio_v11_0_6.c
smuio_v11_0_6.h
smuio_v13_0.c
smuio_v13_0.h
smuio_v13_0_3.c
smuio_v13_0_3.h
smuio_v13_0_6.c
smuio_v13_0_6.h
smuio_v14_0_2.c
smuio_v14_0_2.h
smuio_v9_0.c
smuio_v9_0.h
soc15.c
soc15.h
soc15_common.h
soc15d.h
soc21.c
soc21.h
soc24.c
soc24.h
ta_rap_if.h
ta_ras_if.h
ta_secureDisplay_if.h
ta_xgmi_if.h
tonga_ih.c
tonga_ih.h
tonga_sdma_pkt_open.h
umc_v12_0.c
umc_v12_0.h
umc_v6_0.c
umc_v6_0.h
umc_v6_1.c
umc_v6_1.h
umc_v6_7.c
umc_v6_7.h
umc_v8_10.c
umc_v8_10.h
umc_v8_14.c
umc_v8_14.h
umc_v8_7.c
umc_v8_7.h
umsch_mm_v4_0.c
umsch_mm_v4_0.h
uvd_v3_1.c
uvd_v3_1.h
uvd_v4_2.c
uvd_v4_2.h
uvd_v5_0.c
uvd_v5_0.h
uvd_v6_0.c
uvd_v6_0.h
uvd_v7_0.c
uvd_v7_0.h
vce_v2_0.c
vce_v2_0.h
vce_v3_0.c
vce_v3_0.h
vce_v4_0.c
vce_v4_0.h
vcn_sw_ring.c
vcn_sw_ring.h
vcn_v1_0.c
vcn_v1_0.h
vcn_v2_0.c
vcn_v2_0.h
vcn_v2_5.c
vcn_v2_5.h
vcn_v3_0.c
vcn_v3_0.h
vcn_v4_0.c
vcn_v4_0.h
vcn_v4_0_3.c
vcn_v4_0_3.h
vcn_v4_0_5.c
vcn_v4_0_5.h
vcn_v5_0_0.c
vcn_v5_0_0.h
vcn_v5_0_1.c
vcn_v5_0_1.h
vega10_ih.c
vega10_ih.h
vega10_reg_init.c
vega10_sdma_pkt_open.h
vega20_ih.c
vega20_ih.h
vega20_reg_init.c
vi.c
vi.h
vid.h
vpe_6_1_fw_if.h
vpe_v6_1.c
vpe_v6_1.h
amdkfd
amdxcp
display
include
pm
arm
armada
aspeed
ast
atmel-hlcdc
bridge
ci
clients
display
etnaviv
exynos
fsl-dcu
gma500
gud
hisilicon
hyperv
i915
imagination
imx
ingenic
kmb
lib
lima
logicvc
loongson
mcde
mediatek
meson
mgag200
msm
mxsfb
nouveau
omapdrm
panel
panfrost
panthor
pl111
qxl
radeon
renesas
rockchip
scheduler
solomon
sprd
sti
stm
sun4i
tegra
tests
tidss
tilcdc
tiny
ttm
tve200
udl
v3d
vboxvideo
vc4
vgem
virtio
vkms
vmwgfx
xe
xen
xlnx
Kconfig
Makefile
drm_atomic.c
drm_atomic_helper.c
drm_atomic_state_helper.c
drm_atomic_uapi.c
drm_auth.c
drm_blend.c
drm_bridge.c
drm_buddy.c
drm_cache.c
drm_client.c
drm_client_event.c
drm_client_modeset.c
drm_color_mgmt.c
drm_connector.c
drm_crtc.c
drm_crtc_helper.c
drm_crtc_helper_internal.h
drm_crtc_internal.h
drm_damage_helper.c
drm_debugfs.c
drm_debugfs_crc.c
drm_displayid.c
drm_displayid_internal.h
drm_draw.c
drm_draw_internal.h
drm_drv.c
drm_dumb_buffers.c
drm_edid.c
drm_edid_load.c
drm_eld.c
drm_encoder.c
drm_exec.c
drm_fb_dma_helper.c
drm_fb_helper.c
drm_fbdev_dma.c
drm_fbdev_shmem.c
drm_fbdev_ttm.c
drm_file.c
drm_flip_work.c
drm_format_helper.c
drm_fourcc.c
drm_framebuffer.c
drm_gem.c
drm_gem_atomic_helper.c
drm_gem_dma_helper.c
drm_gem_framebuffer_helper.c
drm_gem_shmem_helper.c
drm_gem_ttm_helper.c
drm_gem_vram_helper.c
drm_gpusvm.c
drm_gpuvm.c
drm_internal.h
drm_ioc32.c
drm_ioctl.c
drm_kms_helper_common.c
drm_lease.c
drm_managed.c
drm_mipi_dbi.c
drm_mipi_dsi.c
drm_mm.c
drm_mode_config.c
drm_mode_object.c
drm_modes.c
drm_modeset_helper.c
drm_modeset_lock.c
drm_of.c
drm_panel.c
drm_panel_backlight_quirks.c
drm_panel_orientation_quirks.c
drm_panic.c
drm_panic_qr.rs
drm_pci.c
drm_plane.c
drm_plane_helper.c
drm_prime.c
drm_print.c
drm_privacy_screen.c
drm_privacy_screen_x86.c
drm_probe_helper.c
drm_property.c
drm_rect.c
drm_self_refresh_helper.c
drm_simple_kms_helper.c
drm_suballoc.c
drm_syncobj.c
drm_sysfs.c
drm_trace.h
drm_trace_points.c
drm_vblank.c
drm_vblank_work.c
drm_vma_manager.c
drm_writeback.c
host1x
ipu-v3
nova-core
trace
vga
Makefile
greybus
hid
hsi
hte
hv
hwmon
hwspinlock
hwtracing
i2c
i3c
idle
iio
infiniband
input
interconnect
iommu
ipack
irqchip
isdn
leds
macintosh
mailbox
mcb
md
media
memory
memstick
message
mfd
misc
mmc
most
mtd
mux
net
nfc
ntb
nubus
nvdimm
nvme
nvmem
of
opp
parisc
parport
pci
pcmcia
peci
perf
phy
pinctrl
platform
pmdomain
pnp
power
powercap
pps
ps3
ptp
pwm
rapidio
ras
regulator
remoteproc
reset
rpmsg
rtc
s390
sbus
scsi
sh
siox
slimbus
soc
soundwire
spi
spmi
ssb
staging
target
tc
tee
thermal
thunderbolt
tty
ufs
uio
usb
vdpa
vfio
vhost
video
virt
virtio
w1
watchdog
xen
zorro
Kconfig
Makefile
fs
include
init
io_uring
ipc
kernel
lib
mm
net
rust
samples
scripts
security
sound
tools
usr
virt
.clang-format
.clippy.toml
.cocciconfig
.editorconfig
.get_maintainer.ignore
.gitattributes
.gitignore
.mailmap
.rustfmt.toml
COPYING
CREDITS
Kbuild
Kconfig
MAINTAINERS
Makefile
README
Breadcrumbs
linux
/
drivers
/
gpu
/
drm
/
amd
/
amdgpu
/
amdgpu_mes.c
Blame
Blame
Latest commit
History
History
809 lines (666 loc) · 22 KB
Breadcrumbs
linux
/
drivers
/
gpu
/
drm
/
amd
/
amdgpu
/
amdgpu_mes.c
Top
File metadata and controls
Code
Blame
809 lines (666 loc) · 22 KB
Raw
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #include <linux/firmware.h> #include <drm/drm_exec.h> #include "amdgpu_mes.h" #include "amdgpu.h" #include "soc15_common.h" #include "amdgpu_mes_ctx.h" #define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 #define AMDGPU_ONE_DOORBELL_SIZE 8 int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev) { return roundup(AMDGPU_ONE_DOORBELL_SIZE * AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS, PAGE_SIZE); } static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev) { int i; struct amdgpu_mes *mes = &adev->mes; /* Bitmap for dynamic allocation of kernel doorbells */ mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL); if (!mes->doorbell_bitmap) { DRM_ERROR("Failed to allocate MES doorbell bitmap\n"); return -ENOMEM; } mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE; for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) { adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2; set_bit(i, mes->doorbell_bitmap); } return 0; } static int amdgpu_mes_event_log_init(struct amdgpu_device *adev) { int r; if (!amdgpu_mes_log_enable) return 0; r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->mes.event_log_gpu_obj, &adev->mes.event_log_gpu_addr, &adev->mes.event_log_cpu_addr); if (r) { dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r); return r; } memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size); return 0; } static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev) { bitmap_free(adev->mes.doorbell_bitmap); } int amdgpu_mes_init(struct amdgpu_device *adev) { int i, r, num_pipes; adev->mes.adev = adev; idr_init(&adev->mes.pasid_idr); idr_init(&adev->mes.gang_id_idr); idr_init(&adev->mes.queue_id_idr); ida_init(&adev->mes.doorbell_ida); spin_lock_init(&adev->mes.queue_id_lock); mutex_init(&adev->mes.mutex_hidden); for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) spin_lock_init(&adev->mes.ring_lock[i]); adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK; adev->mes.vmid_mask_mmhub = 0xffffff00; adev->mes.vmid_mask_gfxhub = 0xffffff00; num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me; if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES) dev_warn(adev->dev, "more gfx pipes than supported by MES! (%d vs %d)\n", num_pipes, AMDGPU_MES_MAX_GFX_PIPES); for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) { if (i >= num_pipes) break; if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) /* * GFX V12 has only one GFX pipe, but 8 queues in it. * GFX pipe 0 queue 0 is being used by Kernel queue. * Set GFX pipe 0 queue 1-7 for MES scheduling * mask = 1111 1110b */ adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0xFF : 0xFE; else /* * GFX pipe 0 queue 0 is being used by Kernel queue. * Set GFX pipe 0 queue 1 for MES scheduling * mask = 10b */ adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0x3 : 0x2; } num_pipes = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec; if (num_pipes > AMDGPU_MES_MAX_COMPUTE_PIPES) dev_warn(adev->dev, "more compute pipes than supported by MES! (%d vs %d)\n", num_pipes, AMDGPU_MES_MAX_COMPUTE_PIPES); for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) { if (i >= num_pipes) break; adev->mes.compute_hqd_mask[i] = adev->gfx.disable_kq ? 0xF : 0xC; } num_pipes = adev->sdma.num_instances; if (num_pipes > AMDGPU_MES_MAX_SDMA_PIPES) dev_warn(adev->dev, "more SDMA pipes than supported by MES! (%d vs %d)\n", num_pipes, AMDGPU_MES_MAX_SDMA_PIPES); for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) { if (i >= num_pipes) break; adev->mes.sdma_hqd_mask[i] = 0xfc; } for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) { r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]); if (r) { dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r); goto error; } adev->mes.sch_ctx_gpu_addr[i] = adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4); adev->mes.sch_ctx_ptr[i] = (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]]; r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs[i]); if (r) { dev_err(adev->dev, "(%d) query_status_fence_offs wb alloc failed\n", r); goto error; } adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr + (adev->mes.query_status_fence_offs[i] * 4); adev->mes.query_status_fence_ptr[i] = (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]]; } r = amdgpu_mes_doorbell_init(adev); if (r) goto error; r = amdgpu_mes_event_log_init(adev); if (r) goto error_doorbell; return 0; error_doorbell: amdgpu_mes_doorbell_free(adev); error: for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) { if (adev->mes.sch_ctx_ptr[i]) amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]); if (adev->mes.query_status_fence_ptr[i]) amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs[i]); } idr_destroy(&adev->mes.pasid_idr); idr_destroy(&adev->mes.gang_id_idr); idr_destroy(&adev->mes.queue_id_idr); ida_destroy(&adev->mes.doorbell_ida); mutex_destroy(&adev->mes.mutex_hidden); return r; } void amdgpu_mes_fini(struct amdgpu_device *adev) { int i; amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj, &adev->mes.event_log_gpu_addr, &adev->mes.event_log_cpu_addr); for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) { if (adev->mes.sch_ctx_ptr[i]) amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]); if (adev->mes.query_status_fence_ptr[i]) amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs[i]); } amdgpu_mes_doorbell_free(adev); idr_destroy(&adev->mes.pasid_idr); idr_destroy(&adev->mes.gang_id_idr); idr_destroy(&adev->mes.queue_id_idr); ida_destroy(&adev->mes.doorbell_ida); mutex_destroy(&adev->mes.mutex_hidden); } int amdgpu_mes_suspend(struct amdgpu_device *adev) { struct mes_suspend_gang_input input; int r; if (!amdgpu_mes_suspend_resume_all_supported(adev)) return 0; memset(&input, 0x0, sizeof(struct mes_suspend_gang_input)); input.suspend_all_gangs = 1; /* * Avoid taking any other locks under MES lock to avoid circular * lock dependencies. */ amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->suspend_gang(&adev->mes, &input); amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("failed to suspend all gangs"); return r; } int amdgpu_mes_resume(struct amdgpu_device *adev) { struct mes_resume_gang_input input; int r; if (!amdgpu_mes_suspend_resume_all_supported(adev)) return 0; memset(&input, 0x0, sizeof(struct mes_resume_gang_input)); input.resume_all_gangs = 1; /* * Avoid taking any other locks under MES lock to avoid circular * lock dependencies. */ amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->resume_gang(&adev->mes, &input); amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("failed to resume all gangs"); return r; } int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id) { unsigned long flags; struct amdgpu_mes_queue *queue; struct amdgpu_mes_gang *gang; struct mes_reset_queue_input queue_input; int r; /* * Avoid taking any other locks under MES lock to avoid circular * lock dependencies. */ amdgpu_mes_lock(&adev->mes); /* remove the mes gang from idr list */ spin_lock_irqsave(&adev->mes.queue_id_lock, flags); queue = idr_find(&adev->mes.queue_id_idr, queue_id); if (!queue) { spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); amdgpu_mes_unlock(&adev->mes); DRM_ERROR("queue id %d doesn't exist\n", queue_id); return -EINVAL; } spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags); DRM_DEBUG("try to reset queue, doorbell off = 0x%llx\n", queue->doorbell_off); gang = queue->gang; queue_input.doorbell_offset = queue->doorbell_off; queue_input.gang_context_addr = gang->gang_ctx_gpu_addr; r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input); if (r) DRM_ERROR("failed to reset hardware queue, queue id = %d\n", queue_id); amdgpu_mes_unlock(&adev->mes); return 0; } int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type, int me_id, int pipe_id, int queue_id, int vmid) { struct mes_reset_queue_input queue_input; int r; queue_input.queue_type = queue_type; queue_input.use_mmio = true; queue_input.me_id = me_id; queue_input.pipe_id = pipe_id; queue_input.queue_id = queue_id; queue_input.vmid = vmid; r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input); if (r) DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n", queue_id); return r; } int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring) { struct mes_map_legacy_queue_input queue_input; int r; memset(&queue_input, 0, sizeof(queue_input)); queue_input.queue_type = ring->funcs->type; queue_input.doorbell_offset = ring->doorbell_index; queue_input.pipe_id = ring->pipe; queue_input.queue_id = ring->queue; queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); queue_input.wptr_addr = ring->wptr_gpu_addr; r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input); if (r) DRM_ERROR("failed to map legacy queue\n"); return r; } int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring, enum amdgpu_unmap_queues_action action, u64 gpu_addr, u64 seq) { struct mes_unmap_legacy_queue_input queue_input; int r; queue_input.action = action; queue_input.queue_type = ring->funcs->type; queue_input.doorbell_offset = ring->doorbell_index; queue_input.pipe_id = ring->pipe; queue_input.queue_id = ring->queue; queue_input.trail_fence_addr = gpu_addr; queue_input.trail_fence_data = seq; r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input); if (r) DRM_ERROR("failed to unmap legacy queue\n"); return r; } int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned int vmid, bool use_mmio) { struct mes_reset_legacy_queue_input queue_input; int r; memset(&queue_input, 0, sizeof(queue_input)); queue_input.queue_type = ring->funcs->type; queue_input.doorbell_offset = ring->doorbell_index; queue_input.me_id = ring->me; queue_input.pipe_id = ring->pipe; queue_input.queue_id = ring->queue; queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0; queue_input.wptr_addr = ring->wptr_gpu_addr; queue_input.vmid = vmid; queue_input.use_mmio = use_mmio; r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input); if (r) DRM_ERROR("failed to reset legacy queue\n"); return r; } uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) { struct mes_misc_op_input op_input; int r, val = 0; uint32_t addr_offset = 0; uint64_t read_val_gpu_addr; uint32_t *read_val_ptr; if (amdgpu_device_wb_get(adev, &addr_offset)) { DRM_ERROR("critical bug! too many mes readers\n"); goto error; } read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4); read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset]; op_input.op = MES_MISC_OP_READ_REG; op_input.read_reg.reg_offset = reg; op_input.read_reg.buffer_addr = read_val_gpu_addr; if (!adev->mes.funcs->misc_op) { DRM_ERROR("mes rreg is not supported!\n"); goto error; } r = adev->mes.funcs->misc_op(&adev->mes, &op_input); if (r) DRM_ERROR("failed to read reg (0x%x)\n", reg); else val = *(read_val_ptr); error: if (addr_offset) amdgpu_device_wb_free(adev, addr_offset); return val; } int amdgpu_mes_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t val) { struct mes_misc_op_input op_input; int r; op_input.op = MES_MISC_OP_WRITE_REG; op_input.write_reg.reg_offset = reg; op_input.write_reg.reg_value = val; if (!adev->mes.funcs->misc_op) { DRM_ERROR("mes wreg is not supported!\n"); r = -EINVAL; goto error; } r = adev->mes.funcs->misc_op(&adev->mes, &op_input); if (r) DRM_ERROR("failed to write reg (0x%x)\n", reg); error: return r; } int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask) { struct mes_misc_op_input op_input; int r; op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT; op_input.wrm_reg.reg0 = reg0; op_input.wrm_reg.reg1 = reg1; op_input.wrm_reg.ref = ref; op_input.wrm_reg.mask = mask; if (!adev->mes.funcs->misc_op) { DRM_ERROR("mes reg_write_reg_wait is not supported!\n"); r = -EINVAL; goto error; } r = adev->mes.funcs->misc_op(&adev->mes, &op_input); if (r) DRM_ERROR("failed to reg_write_reg_wait\n"); error: return r; } int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg, uint32_t val, uint32_t mask) { struct mes_misc_op_input op_input; int r; op_input.op = MES_MISC_OP_WRM_REG_WAIT; op_input.wrm_reg.reg0 = reg; op_input.wrm_reg.ref = val; op_input.wrm_reg.mask = mask; if (!adev->mes.funcs->misc_op) { DRM_ERROR("mes reg wait is not supported!\n"); r = -EINVAL; goto error; } r = adev->mes.funcs->misc_op(&adev->mes, &op_input); if (r) DRM_ERROR("failed to reg_write_reg_wait\n"); error: return r; } int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, uint64_t process_context_addr, uint32_t spi_gdbg_per_vmid_cntl, const uint32_t *tcp_watch_cntl, uint32_t flags, bool trap_en) { struct mes_misc_op_input op_input = {0}; int r; if (!adev->mes.funcs->misc_op) { DRM_ERROR("mes set shader debugger is not supported!\n"); return -EINVAL; } op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER; op_input.set_shader_debugger.process_context_addr = process_context_addr; op_input.set_shader_debugger.flags.u32all = flags; /* use amdgpu mes_flush_shader_debugger instead */ if (op_input.set_shader_debugger.flags.process_ctx_flush) return -EINVAL; op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl; memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl, sizeof(op_input.set_shader_debugger.tcp_watch_cntl)); if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >> AMDGPU_MES_API_VERSION_SHIFT) >= 14) op_input.set_shader_debugger.trap_en = trap_en; amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->misc_op(&adev->mes, &op_input); if (r) DRM_ERROR("failed to set_shader_debugger\n"); amdgpu_mes_unlock(&adev->mes); return r; } int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev, uint64_t process_context_addr) { struct mes_misc_op_input op_input = {0}; int r; if (!adev->mes.funcs->misc_op) { DRM_ERROR("mes flush shader debugger is not supported!\n"); return -EINVAL; } op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER; op_input.set_shader_debugger.process_context_addr = process_context_addr; op_input.set_shader_debugger.flags.process_ctx_flush = true; amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->misc_op(&adev->mes, &op_input); if (r) DRM_ERROR("failed to set_shader_debugger\n"); amdgpu_mes_unlock(&adev->mes); return r; } #define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng) \ do { \ if (id_offs < AMDGPU_MES_CTX_MAX_OFFS) \ return offsetof(struct amdgpu_mes_ctx_meta_data, \ _eng[ring->idx].slots[id_offs]); \ else if (id_offs == AMDGPU_MES_CTX_RING_OFFS) \ return offsetof(struct amdgpu_mes_ctx_meta_data, \ _eng[ring->idx].ring); \ else if (id_offs == AMDGPU_MES_CTX_IB_OFFS) \ return offsetof(struct amdgpu_mes_ctx_meta_data, \ _eng[ring->idx].ib); \ else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS) \ return offsetof(struct amdgpu_mes_ctx_meta_data, \ _eng[ring->idx].padding); \ } while(0) int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs) { switch (ring->funcs->type) { case AMDGPU_RING_TYPE_GFX: DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx); break; case AMDGPU_RING_TYPE_COMPUTE: DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute); break; case AMDGPU_RING_TYPE_SDMA: DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma); break; default: break; } WARN_ON(1); return -EINVAL; } uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev, enum amdgpu_mes_priority_level prio) { return adev->mes.aggregated_doorbells[prio]; } int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) { const struct mes_firmware_header_v1_0 *mes_hdr; struct amdgpu_firmware_info *info; char ucode_prefix[30]; char fw_name[50]; bool need_retry = false; u32 *ucode_ptr; int r; amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); if (adev->enable_uni_mes) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_uni_mes.bin", ucode_prefix); } else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) && amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", ucode_prefix, pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1"); need_retry = true; } else { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", ucode_prefix, pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1"); } r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], AMDGPU_UCODE_REQUIRED, "%s", fw_name); if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) { dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix); r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], AMDGPU_UCODE_REQUIRED, "amdgpu/%s_mes.bin", ucode_prefix); } if (r) goto out; mes_hdr = (const struct mes_firmware_header_v1_0 *) adev->mes.fw[pipe]->data; adev->mes.uc_start_addr[pipe] = le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) | ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32); adev->mes.data_start_addr[pipe] = le32_to_cpu(mes_hdr->mes_data_start_addr_lo) | ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32); ucode_ptr = (u32 *)(adev->mes.fw[pipe]->data + sizeof(union amdgpu_firmware_header)); adev->mes.fw_version[pipe] = le32_to_cpu(ucode_ptr[24]) & AMDGPU_MES_VERSION_MASK; if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { int ucode, ucode_data; if (pipe == AMDGPU_MES_SCHED_PIPE) { ucode = AMDGPU_UCODE_ID_CP_MES; ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA; } else { ucode = AMDGPU_UCODE_ID_CP_MES1; ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA; } info = &adev->firmware.ucode[ucode]; info->ucode_id = ucode; info->fw = adev->mes.fw[pipe]; adev->firmware.fw_size += ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes), PAGE_SIZE); info = &adev->firmware.ucode[ucode_data]; info->ucode_id = ucode_data; info->fw = adev->mes.fw[pipe]; adev->firmware.fw_size += ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes), PAGE_SIZE); } return 0; out: amdgpu_ucode_release(&adev->mes.fw[pipe]); return r; } bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev) { uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK; bool is_supported = false; if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) && amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) && mes_rev >= 0x63) is_supported = true; return is_supported; } /* Fix me -- node_id is used to identify the correct MES instances in the future */ static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable) { struct mes_misc_op_input op_input = {0}; int r; op_input.op = MES_MISC_OP_CHANGE_CONFIG; op_input.change_config.option.limit_single_process = enable ? 1 : 0; if (!adev->mes.funcs->misc_op) { dev_err(adev->dev, "mes change config is not supported!\n"); r = -EINVAL; goto error; } r = adev->mes.funcs->misc_op(&adev->mes, &op_input); if (r) dev_err(adev->dev, "failed to change_config.\n"); error: return r; } int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev) { int i, r = 0; if (adev->enable_mes && adev->gfx.enable_cleaner_shader) { mutex_lock(&adev->enforce_isolation_mutex); for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) { if (adev->enforce_isolation[i]) r |= amdgpu_mes_set_enforce_isolation(adev, i, true); else r |= amdgpu_mes_set_enforce_isolation(adev, i, false); } mutex_unlock(&adev->enforce_isolation_mutex); } return r; } #if defined(CONFIG_DEBUG_FS) static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused) { struct amdgpu_device *adev = m->private; uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr); seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4, mem, adev->mes.event_log_size, false); return 0; } DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log); #endif void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) struct drm_minor *minor = adev_to_drm(adev)->primary; struct dentry *root = minor->debugfs_root; if (adev->enable_mes && amdgpu_mes_log_enable) debugfs_create_file("amdgpu_mes_event_log", 0444, root, adev, &amdgpu_debugfs_mes_event_log_fops); #endif }
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
You can’t perform that action at this time.