Skip to content

Commit

Permalink
Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/l…
Browse files Browse the repository at this point in the history
…inux into drm-next

Changes for vmwgfx for 4.4. If there is time, I'll follow up with a series
to move to threaded irqs.

* 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux:
  drm/vmwgfx: Replace iowrite/ioread with volatile memory accesses
  drm/vmwgfx: Turn off support for multisample count != 0 v2
  drm/vmwgfx: switch from ioremap_cache to memremap
  • Loading branch information
Dave Airlie committed Nov 2, 2015
2 parents a76edb8 + b76ff5e commit c0f3f90
Show file tree
Hide file tree
Showing 8 changed files with 140 additions and 101 deletions.
8 changes: 4 additions & 4 deletions drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -752,8 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
dev_priv->active_master = &dev_priv->fbdev_master;

dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start,
dev_priv->mmio_size);
dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
dev_priv->mmio_size, MEMREMAP_WB);

if (unlikely(dev_priv->mmio_virt == NULL)) {
ret = -ENOMEM;
Expand Down Expand Up @@ -907,7 +907,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
out_no_device:
ttm_object_device_release(&dev_priv->tdev);
out_err4:
iounmap(dev_priv->mmio_virt);
memunmap(dev_priv->mmio_virt);
out_err3:
vmw_ttm_global_release(dev_priv);
out_err0:
Expand Down Expand Up @@ -958,7 +958,7 @@ static int vmw_driver_unload(struct drm_device *dev)
pci_release_regions(dev->pdev);

ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt);
memunmap(dev_priv->mmio_virt);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
vmw_ttm_global_release(dev_priv);
Expand Down
28 changes: 27 additions & 1 deletion drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,7 @@ struct vmw_private {
uint32_t stdu_max_height;
uint32_t initial_width;
uint32_t initial_height;
u32 __iomem *mmio_virt;
u32 *mmio_virt;
uint32_t capabilities;
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
Expand Down Expand Up @@ -1206,4 +1206,30 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
{
atomic_dec(&dev_priv->num_fifo_resources);
}

/**
* vmw_mmio_read - Perform a MMIO read from volatile memory
*
* @addr: The address to read from
*
* This function is intended to be equivalent to ioread32() on
* memremap'd memory, but without byteswapping.
*/
static inline u32 vmw_mmio_read(u32 *addr)
{
return READ_ONCE(*addr);
}

/**
* vmw_mmio_write - Perform a MMIO write to volatile memory
*
* @addr: The address to write to
*
* This function is intended to be equivalent to iowrite32 on
* memremap'd memory, but without byteswapping.
*/
static inline void vmw_mmio_write(u32 value, u32 *addr)
{
WRITE_ONCE(*addr, value);
}
#endif
24 changes: 12 additions & 12 deletions drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,8 +142,8 @@ static bool vmw_fence_enable_signaling(struct fence *f)
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;

u32 __iomem *fifo_mem = dev_priv->mmio_virt;
u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
u32 *fifo_mem = dev_priv->mmio_virt;
u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false;

Expand Down Expand Up @@ -386,23 +386,23 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
u32 passed_seqno)
{
u32 goal_seqno;
u32 __iomem *fifo_mem;
u32 *fifo_mem;
struct vmw_fence_obj *fence;

if (likely(!fman->seqno_valid))
return false;

fifo_mem = fman->dev_priv->mmio_virt;
goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
return false;

fman->seqno_valid = false;
list_for_each_entry(fence, &fman->fence_list, head) {
if (!list_empty(&fence->seq_passed_actions)) {
fman->seqno_valid = true;
iowrite32(fence->base.seqno,
fifo_mem + SVGA_FIFO_FENCE_GOAL);
vmw_mmio_write(fence->base.seqno,
fifo_mem + SVGA_FIFO_FENCE_GOAL);
break;
}
}
Expand Down Expand Up @@ -430,18 +430,18 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
{
struct vmw_fence_manager *fman = fman_from_fence(fence);
u32 goal_seqno;
u32 __iomem *fifo_mem;
u32 *fifo_mem;

if (fence_is_signaled_locked(&fence->base))
return false;

fifo_mem = fman->dev_priv->mmio_virt;
goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
if (likely(fman->seqno_valid &&
goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
return false;

iowrite32(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
fman->seqno_valid = true;

return true;
Expand All @@ -453,9 +453,9 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
struct list_head action_list;
bool needs_rerun;
uint32_t seqno, new_seqno;
u32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
u32 *fifo_mem = fman->dev_priv->mmio_virt;

seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
Expand All @@ -477,7 +477,7 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)

needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
if (unlikely(needs_rerun)) {
new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
if (new_seqno != seqno) {
seqno = new_seqno;
goto rerun;
Expand Down
Loading

0 comments on commit c0f3f90

Please sign in to comment.