Skip to content

Commit

Permalink
Merge branch 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/l…
Browse files Browse the repository at this point in the history
…inux into drm-next

Last set of features for 4.15.  Highlights:
- Add a bo flag to allow buffers to opt out of implicit sync
- Add ctx priority setting interface
- Lots more powerplay cleanups
- Start to plumb through vram lost infrastructure for gpu reset
- ttm support for huge pages
- misc cleanups and bug fixes

* 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux: (73 commits)
  drm/amd/powerplay: Place the constant on the right side of the test
  drm/amd/powerplay: Remove useless variable
  drm/amd/powerplay: Don't cast kzalloc() return value
  drm/amdgpu: allow GTT overcommit during bind
  drm/amdgpu: linear validate first then bind to GART
  drm/amd/pp: Fix overflow when setup decf/pix/disp dpm table.
  drm/amd/pp: thermal control not enabled on vega10.
  drm/amdgpu: busywait KIQ register accessing (v4)
  drm/amdgpu: report more amdgpu_fence_info
  drm/amdgpu:don't check soft_reset for sriov
  drm/amdgpu:fix duplicated setting job's vram_lost
  drm/amdgpu:reduce wb to 512 slot
  drm/amdgpu: fix regresstion on SR-IOV gpu reset failed
  drm/amd/powerplay: Tidy up cz_dpm_powerup_vce()
  drm/amd/powerplay: Tidy up cz_dpm_powerdown_vce()
  drm/amd/powerplay: Tidy up cz_dpm_update_vce_dpm()
  drm/amd/powerplay: Tidy up cz_dpm_update_uvd_dpm()
  drm/amd/powerplay: Tidy up cz_dpm_powerup_uvd()
  drm/amd/powerplay: Tidy up cz_dpm_powerdown_uvd()
  drm/amd/powerplay: Tidy up cz_start_dpm()
  ...
  • Loading branch information
Dave Airlie committed Oct 20, 2017
2 parents 40d8670 + 96687ec commit 6585d42
Show file tree
Hide file tree
Showing 65 changed files with 12,036 additions and 13,396 deletions.
2 changes: 1 addition & 1 deletion drivers/gpu/drm/amd/amdgpu/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_queue_mgr.o amdgpu_vf_error.o
amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o

# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
Expand Down
37 changes: 31 additions & 6 deletions drivers/gpu/drm/amd/amdgpu/amdgpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -732,10 +732,14 @@ struct amdgpu_ctx {
struct amdgpu_device *adev;
struct amdgpu_queue_mgr queue_mgr;
unsigned reset_counter;
uint32_t vram_lost_counter;
spinlock_t ring_lock;
struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
bool preamble_presented;
bool preamble_presented;
enum amd_sched_priority init_priority;
enum amd_sched_priority override_priority;
struct mutex lock;
};

struct amdgpu_ctx_mgr {
Expand All @@ -752,13 +756,18 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct dma_fence *fence, uint64_t *seq);
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum amd_sched_priority priority);

int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);

int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);

void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);


/*
* file private structure
*/
Expand All @@ -770,7 +779,6 @@ struct amdgpu_fpriv {
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
u32 vram_lost_counter;
};

/*
Expand Down Expand Up @@ -871,7 +879,7 @@ struct amdgpu_mec {
struct amdgpu_kiq {
u64 eop_gpu_addr;
struct amdgpu_bo *eop_obj;
struct mutex ring_mutex;
spinlock_t ring_lock;
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
};
Expand Down Expand Up @@ -1035,6 +1043,10 @@ struct amdgpu_gfx {
bool in_suspend;
/* NGG */
struct amdgpu_ngg ngg;

/* pipe reservation */
struct mutex pipe_reserve_mutex;
DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
};

int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
Expand Down Expand Up @@ -1113,6 +1125,7 @@ struct amdgpu_job {
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
uint32_t vram_lost_counter;

/* user fence handling */
uint64_t uf_addr;
Expand All @@ -1138,7 +1151,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
/*
* Writeback
*/
#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
#define AMDGPU_MAX_WB 512 /* Reserve at most 512 WB slots for amdgpu-owned rings. */

struct amdgpu_wb {
struct amdgpu_bo *wb_obj;
Expand Down Expand Up @@ -1378,6 +1391,18 @@ struct amdgpu_atcs {
struct amdgpu_atcs_functions functions;
};

/*
* Firmware VRAM reservation
*/
struct amdgpu_fw_vram_usage {
u64 start_offset;
u64 size;
struct amdgpu_bo *reserved_bo;
void *va;
};

int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev);

/*
* CGS
*/
Expand Down Expand Up @@ -1582,6 +1607,8 @@ struct amdgpu_device {
struct delayed_work late_init_work;

struct amdgpu_virt virt;
/* firmware VRAM reservation */
struct amdgpu_fw_vram_usage fw_vram_usage;

/* link all shadow bo */
struct list_head shadow_list;
Expand Down Expand Up @@ -1833,8 +1860,6 @@ static inline bool amdgpu_has_atpx(void) { return false; }
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
extern const int amdgpu_max_kms_ioctl;

bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv);
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
void amdgpu_driver_unload_kms(struct drm_device *dev);
void amdgpu_driver_lastclose_kms(struct drm_device *dev);
Expand Down
18 changes: 17 additions & 1 deletion drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
Original file line number Diff line number Diff line change
Expand Up @@ -1807,6 +1807,8 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
uint16_t data_offset;
int usage_bytes = 0;
struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
u64 start_addr;
u64 size;

if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
Expand All @@ -1815,7 +1817,21 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));

usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
start_addr = firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware;
size = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb;

if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */
adev->fw_vram_usage.start_offset = (start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
adev->fw_vram_usage.size = size << 10;
/* Use the default scratch size */
usage_bytes = 0;
} else {
usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
}
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
Expand Down
Loading

0 comments on commit 6585d42

Please sign in to comment.