Skip to content

Commit

Permalink
drm/i915: Use non-atomic kmap for slow copy paths
Browse files Browse the repository at this point in the history
As we do not have a requirement to be atomic and avoid sleeping whilst
performing the slow copy for shmem based pread and pwrite, we can use
kmap instead, thus simplifying the code.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Eric Anholt <eric@anholt.net>
  • Loading branch information
Chris Wilson authored and Eric Anholt committed May 28, 2010
1 parent 9b8c4a0 commit 99a03df
Showing 1 changed file with 30 additions and 52 deletions.
82 changes: 30 additions & 52 deletions drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
obj_priv->tiling_mode != I915_TILING_NONE;
}

static inline int
static inline void
slow_shmem_copy(struct page *dst_page,
int dst_offset,
struct page *src_page,
Expand All @@ -176,25 +176,16 @@ slow_shmem_copy(struct page *dst_page,
{
char *dst_vaddr, *src_vaddr;

dst_vaddr = kmap_atomic(dst_page, KM_USER0);
if (dst_vaddr == NULL)
return -ENOMEM;

src_vaddr = kmap_atomic(src_page, KM_USER1);
if (src_vaddr == NULL) {
kunmap_atomic(dst_vaddr, KM_USER0);
return -ENOMEM;
}
dst_vaddr = kmap(dst_page);
src_vaddr = kmap(src_page);

memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);

kunmap_atomic(src_vaddr, KM_USER1);
kunmap_atomic(dst_vaddr, KM_USER0);

return 0;
kunmap(src_page);
kunmap(dst_page);
}

static inline int
static inline void
slow_shmem_bit17_copy(struct page *gpu_page,
int gpu_offset,
struct page *cpu_page,
Expand All @@ -214,15 +205,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
cpu_page, cpu_offset, length);
}

gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
if (gpu_vaddr == NULL)
return -ENOMEM;

cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
if (cpu_vaddr == NULL) {
kunmap_atomic(gpu_vaddr, KM_USER0);
return -ENOMEM;
}
gpu_vaddr = kmap(gpu_page);
cpu_vaddr = kmap(cpu_page);

/* Copy the data, XORing A6 with A17 (1). The user already knows he's
* XORing with the other bits (A9 for Y, A9 and A10 for X)
Expand All @@ -246,10 +230,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
length -= this_length;
}

kunmap_atomic(cpu_vaddr, KM_USER1);
kunmap_atomic(gpu_vaddr, KM_USER0);

return 0;
kunmap(cpu_page);
kunmap(gpu_page);
}

/**
Expand Down Expand Up @@ -425,21 +407,19 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
page_length = PAGE_SIZE - data_page_offset;

if (do_bit17_swizzling) {
ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
page_length,
1);
} else {
ret = slow_shmem_copy(user_pages[data_page_index],
data_page_offset,
obj_priv->pages[shmem_page_index],
slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
shmem_page_offset,
page_length);
user_pages[data_page_index],
data_page_offset,
page_length,
1);
} else {
slow_shmem_copy(user_pages[data_page_index],
data_page_offset,
obj_priv->pages[shmem_page_index],
shmem_page_offset,
page_length);
}
if (ret)
goto fail_put_pages;

remain -= page_length;
data_ptr += page_length;
Expand Down Expand Up @@ -900,21 +880,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
page_length = PAGE_SIZE - data_page_offset;

if (do_bit17_swizzling) {
ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
page_length,
0);
} else {
ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
page_length);
page_length,
0);
} else {
slow_shmem_copy(obj_priv->pages[shmem_page_index],
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
page_length);
}
if (ret)
goto fail_put_pages;

remain -= page_length;
data_ptr += page_length;
Expand Down

0 comments on commit 99a03df

Please sign in to comment.