Skip to content

Commit

Permalink
drm/vmwgfx: Cleanup fifo mmio handling
Browse files Browse the repository at this point in the history
Going forward the svga device might reuse mmio for general
register accesses, in order to prepare for that we need to
cleanup our naming and handling of fifo specific mmio reads
and writes. As part of this work lets switch to managed
mapping of the fifo mmio to make the error handling cleaner.

Signed-off-by: Zack Rusin <zackr@vmware.com>
Reviewed-by: Martin Krastev <krastevm@vmware.com>
Reviewed-by: Roland Scheidegger <sroland@vmware.com>
Link: https://patchwork.freedesktop.org/patch/414045/?series=85516&rev=2
  • Loading branch information
Zack Rusin committed Jan 14, 2021
1 parent 9703bb3 commit be4f77a
Show file tree
Hide file tree
Showing 7 changed files with 98 additions and 113 deletions.
37 changes: 18 additions & 19 deletions drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -683,7 +683,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, unsigned long chipset)

dev_priv->io_start = pci_resource_start(pdev, 0);
dev_priv->vram_start = pci_resource_start(pdev, 1);
dev_priv->mmio_start = pci_resource_start(pdev, 2);
dev_priv->fifo_mem_start = pci_resource_start(pdev, 2);

dev_priv->assume_16bpp = !!vmw_assume_16bpp;

Expand Down Expand Up @@ -713,7 +713,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, unsigned long chipset)
}

dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);

Expand Down Expand Up @@ -797,19 +797,21 @@ static int vmw_driver_load(struct vmw_private *dev_priv, unsigned long chipset)
DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
(unsigned)dev_priv->memory_size / 1024);
}
DRM_INFO("Maximum display memory size is %u kiB\n",
dev_priv->prim_bb_mem / 1024);
DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
dev_priv->vram_start, dev_priv->vram_size / 1024);
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
dev_priv->mmio_start, dev_priv->mmio_size / 1024);

dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
dev_priv->mmio_size, MEMREMAP_WB);

if (unlikely(dev_priv->mmio_virt == NULL)) {
DRM_INFO("Maximum display memory size is %llu kiB\n",
(uint64_t)dev_priv->prim_bb_mem / 1024);
DRM_INFO("VRAM at %pa size is %llu kiB\n",
&dev_priv->vram_start, (uint64_t)dev_priv->vram_size / 1024);
DRM_INFO("MMIO at %pa size is %llu kiB\n",
&dev_priv->fifo_mem_start, (uint64_t)dev_priv->fifo_mem_size / 1024);

dev_priv->fifo_mem = devm_memremap(dev_priv->drm.dev,
dev_priv->fifo_mem_start,
dev_priv->fifo_mem_size,
MEMREMAP_WB);

if (unlikely(dev_priv->fifo_mem == NULL)) {
ret = -ENOMEM;
DRM_ERROR("Failed mapping MMIO.\n");
DRM_ERROR("Failed mapping the FIFO MMIO.\n");
goto out_err0;
}

Expand All @@ -819,7 +821,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, unsigned long chipset)
!vmw_fifo_have_pitchlock(dev_priv)) {
ret = -ENOSYS;
DRM_ERROR("Hardware has no pitchlock\n");
goto out_err4;
goto out_err0;
}

dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
Expand All @@ -828,7 +830,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, unsigned long chipset)
if (unlikely(dev_priv->tdev == NULL)) {
DRM_ERROR("Unable to initialize TTM object management.\n");
ret = -ENOMEM;
goto out_err4;
goto out_err0;
}

dev_priv->drm.dev_private = dev_priv;
Expand Down Expand Up @@ -988,8 +990,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, unsigned long chipset)
pci_release_regions(pdev);
out_no_device:
ttm_object_device_release(&dev_priv->tdev);
out_err4:
memunmap(dev_priv->mmio_virt);
out_err0:
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
Expand Down Expand Up @@ -1037,7 +1037,6 @@ static void vmw_driver_unload(struct drm_device *dev)
pci_release_regions(pdev);

ttm_object_device_release(&dev_priv->tdev);
memunmap(dev_priv->mmio_virt);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);

Expand Down
31 changes: 16 additions & 15 deletions drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -496,12 +496,13 @@ struct vmw_private {

struct drm_vma_offset_manager vma_manager;
unsigned long vmw_chipset;
unsigned int io_start;
uint32_t vram_start;
uint32_t vram_size;
uint32_t prim_bb_mem;
uint32_t mmio_start;
uint32_t mmio_size;
resource_size_t io_start;
resource_size_t vram_start;
resource_size_t vram_size;
resource_size_t prim_bb_mem;
u32 *fifo_mem;
resource_size_t fifo_mem_start;
resource_size_t fifo_mem_size;
uint32_t fb_max_width;
uint32_t fb_max_height;
uint32_t texture_max_width;
Expand All @@ -510,7 +511,6 @@ struct vmw_private {
uint32_t stdu_max_height;
uint32_t initial_width;
uint32_t initial_height;
u32 *mmio_virt;
uint32_t capabilities;
uint32_t capabilities2;
uint32_t max_gmr_ids;
Expand Down Expand Up @@ -1575,28 +1575,29 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
}

/**
* vmw_mmio_read - Perform a MMIO read from volatile memory
* vmw_fifo_mem_read - Perform a MMIO read from the fifo memory
*
* @addr: The address to read from
* @fifo_reg: The fifo register to read from
*
* This function is intended to be equivalent to ioread32() on
* memremap'd memory, but without byteswapping.
*/
static inline u32 vmw_mmio_read(u32 *addr)
static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg)
{
return READ_ONCE(*addr);
return READ_ONCE(*(vmw->fifo_mem + fifo_reg));
}

/**
* vmw_mmio_write - Perform a MMIO write to volatile memory
* vmw_fifo_mem_write - Perform a MMIO write to volatile memory
*
* @addr: The address to write to
* @addr: The fifo register to write to
*
* This function is intended to be equivalent to iowrite32 on
* memremap'd memory, but without byteswapping.
*/
static inline void vmw_mmio_write(u32 value, u32 *addr)
static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg,
u32 value)
{
WRITE_ONCE(*addr, value);
WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value);
}
#endif
24 changes: 10 additions & 14 deletions drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,7 @@ static bool vmw_fence_enable_signaling(struct dma_fence *f)
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;

u32 *fifo_mem = dev_priv->mmio_virt;
u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
u32 seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false;

Expand Down Expand Up @@ -401,23 +400,22 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
u32 passed_seqno)
{
u32 goal_seqno;
u32 *fifo_mem;
struct vmw_fence_obj *fence;

if (likely(!fman->seqno_valid))
return false;

fifo_mem = fman->dev_priv->mmio_virt;
goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
return false;

fman->seqno_valid = false;
list_for_each_entry(fence, &fman->fence_list, head) {
if (!list_empty(&fence->seq_passed_actions)) {
fman->seqno_valid = true;
vmw_mmio_write(fence->base.seqno,
fifo_mem + SVGA_FIFO_FENCE_GOAL);
vmw_fifo_mem_write(fman->dev_priv,
SVGA_FIFO_FENCE_GOAL,
fence->base.seqno);
break;
}
}
Expand Down Expand Up @@ -445,18 +443,17 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
u32 goal_seqno;
u32 *fifo_mem;

if (dma_fence_is_signaled_locked(&fence->base))
return false;

fifo_mem = fman->dev_priv->mmio_virt;
goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
if (likely(fman->seqno_valid &&
goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
return false;

vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL,
fence->base.seqno);
fman->seqno_valid = true;

return true;
Expand All @@ -468,9 +465,8 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
struct list_head action_list;
bool needs_rerun;
uint32_t seqno, new_seqno;
u32 *fifo_mem = fman->dev_priv->mmio_virt;

seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
Expand All @@ -492,7 +488,7 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)

needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
if (unlikely(needs_rerun)) {
new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
new_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
if (new_seqno != seqno) {
seqno = new_seqno;
goto rerun;
Expand Down
Loading

0 comments on commit be4f77a

Please sign in to comment.