Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 199680
b: refs/heads/master
c: afa3b60
h: refs/heads/master
v: v3
  • Loading branch information
Dave Airlie committed Jun 1, 2010
1 parent 813dfd2 commit 4a43dfe
Show file tree
Hide file tree
Showing 13 changed files with 471 additions and 177 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: fbf81762e385d3d45acad057b654d56972acf58c
refs/heads/master: afa3b60c905f606e8245115474d77787035e02eb
64 changes: 37 additions & 27 deletions trunk/drivers/gpu/drm/ttm/ttm_page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ struct ttm_page_pool {
/**
* Limits for the pool. They are handled without locks because only place where
* they may change is in sysfs store. They won't have immediate effect anyway
* so forcing serialiazation to access them is pointless.
* so forcing serialization to access them is pointless.
*/

struct ttm_pool_opts {
Expand Down Expand Up @@ -165,16 +165,18 @@ static ssize_t ttm_pool_store(struct kobject *kobj,
m->options.small = val;
else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) {
printk(KERN_ERR "[ttm] Setting allocation size to %lu "
"is not allowed. Recomended size is "
"%lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
printk(KERN_ERR TTM_PFX
"Setting allocation size to %lu "
"is not allowed. Recommended size is "
"%lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
return size;
} else if (val > NUM_PAGES_TO_ALLOC) {
printk(KERN_WARNING "[ttm] Setting allocation size to "
"larger than %lu is not recomended.\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
printk(KERN_WARNING TTM_PFX
"Setting allocation size to "
"larger than %lu is not recommended.\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
}
m->options.alloc_size = val;
}
Expand Down Expand Up @@ -277,7 +279,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages)
{
unsigned i;
if (set_pages_array_wb(pages, npages))
printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
npages);
for (i = 0; i < npages; ++i)
__free_page(pages[i]);
Expand Down Expand Up @@ -313,7 +315,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
printk(KERN_ERR TTM_PFX
"Failed to allocate memory for pool free operation.\n");
return 0;
}

Expand Down Expand Up @@ -390,7 +393,7 @@ static int ttm_pool_get_num_unused_pages(void)
}

/**
* Calback for mm to request pool to reduce number of page held.
* Callback for mm to request pool to reduce number of page held.
*/
static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
{
Expand Down Expand Up @@ -433,14 +436,16 @@ static int ttm_set_pages_caching(struct page **pages,
case tt_uncached:
r = set_pages_array_uc(pages, cpages);
if (r)
printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
cpages);
printk(KERN_ERR TTM_PFX
"Failed to set %d pages to uc!\n",
cpages);
break;
case tt_wc:
r = set_pages_array_wc(pages, cpages);
if (r)
printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
cpages);
printk(KERN_ERR TTM_PFX
"Failed to set %d pages to wc!\n",
cpages);
break;
default:
break;
Expand All @@ -458,7 +463,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
struct page **failed_pages, unsigned cpages)
{
unsigned i;
/* Failed pages has to be reed */
/* Failed pages have to be freed */
for (i = 0; i < cpages; ++i) {
list_del(&failed_pages[i]->lru);
__free_page(failed_pages[i]);
Expand All @@ -485,20 +490,22 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);

if (!caching_array) {
printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
printk(KERN_ERR TTM_PFX
"Unable to allocate table for new pages.");
return -ENOMEM;
}

for (i = 0, cpages = 0; i < count; ++i) {
p = alloc_page(gfp_flags);

if (!p) {
printk(KERN_ERR "[ttm] unable to get page %u\n", i);
printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);

/* store already allocated pages in the pool after
* setting the caching state */
if (cpages) {
r = ttm_set_pages_caching(caching_array, cstate, cpages);
r = ttm_set_pages_caching(caching_array,
cstate, cpages);
if (r)
ttm_handle_caching_state_failure(pages,
ttm_flags, cstate,
Expand Down Expand Up @@ -590,7 +597,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
++pool->nrefills;
pool->npages += alloc_size;
} else {
printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
printk(KERN_ERR TTM_PFX
"Failed to fill pool (%p).", pool);
/* If we have any pages left put them to the pool. */
list_for_each_entry(p, &pool->list, lru) {
++cpages;
Expand Down Expand Up @@ -671,13 +679,14 @@ int ttm_get_pages(struct list_head *pages, int flags,
if (flags & TTM_PAGE_FLAG_DMA32)
gfp_flags |= GFP_DMA32;
else
gfp_flags |= __GFP_HIGHMEM;
gfp_flags |= GFP_HIGHUSER;

for (r = 0; r < count; ++r) {
p = alloc_page(gfp_flags);
if (!p) {

printk(KERN_ERR "[ttm] unable to allocate page.");
printk(KERN_ERR TTM_PFX
"Unable to allocate page.");
return -ENOMEM;
}

Expand Down Expand Up @@ -709,8 +718,9 @@ int ttm_get_pages(struct list_head *pages, int flags,
if (r) {
/* If there is any pages in the list put them back to
* the pool. */
printk(KERN_ERR "[ttm] Failed to allocate extra pages "
"for large request.");
printk(KERN_ERR TTM_PFX
"Failed to allocate extra pages "
"for large request.");
ttm_put_pages(pages, 0, flags, cstate);
return r;
}
Expand Down Expand Up @@ -778,7 +788,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
return 0;

printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");

ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");

Expand Down Expand Up @@ -813,7 +823,7 @@ void ttm_page_alloc_fini()
if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
return;

printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
ttm_pool_mm_shrink_fini(&_manager);

for (i = 0; i < NUM_POOLS; ++i)
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/gpu/drm/vmwgfx/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o
vmwgfx_overlay.o vmwgfx_fence.o

obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
17 changes: 9 additions & 8 deletions trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_err3;
}

/* Need mmio memory to check for fifo pitchlock cap. */
if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
!(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
!vmw_fifo_have_pitchlock(dev_priv)) {
ret = -ENOSYS;
DRM_ERROR("Hardware has no pitchlock\n");
goto out_err4;
}

dev_priv->tdev = ttm_object_device_init
(dev_priv->mem_global_ref.object, 12);

Expand Down Expand Up @@ -399,8 +408,6 @@ static int vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);

DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");

unregister_pm_notifier(&dev_priv->pm_nb);

vmw_fb_close(dev_priv);
Expand Down Expand Up @@ -546,7 +553,6 @@ static int vmw_master_create(struct drm_device *dev,
{
struct vmw_master *vmaster;

DRM_INFO("Master create.\n");
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
if (unlikely(vmaster == NULL))
return -ENOMEM;
Expand All @@ -563,7 +569,6 @@ static void vmw_master_destroy(struct drm_device *dev,
{
struct vmw_master *vmaster = vmw_master(master);

DRM_INFO("Master destroy.\n");
master->driver_priv = NULL;
kfree(vmaster);
}
Expand All @@ -579,8 +584,6 @@ static int vmw_master_set(struct drm_device *dev,
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret = 0;

DRM_INFO("Master set.\n");

if (active) {
BUG_ON(active != &dev_priv->fbdev_master);
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
Expand Down Expand Up @@ -622,8 +625,6 @@ static void vmw_master_drop(struct drm_device *dev,
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;

DRM_INFO("Master drop.\n");

/**
* Make sure the master doesn't disappear while we have
* it locked.
Expand Down
32 changes: 31 additions & 1 deletion trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@

#define VMWGFX_DRIVER_DATE "20100209"
#define VMWGFX_DRIVER_MAJOR 1
#define VMWGFX_DRIVER_MINOR 0
#define VMWGFX_DRIVER_MINOR 1
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
Expand Down Expand Up @@ -102,6 +102,13 @@ struct vmw_surface {
struct vmw_cursor_snooper snooper;
};

struct vmw_fence_queue {
struct list_head head;
struct timespec lag;
struct timespec lag_time;
spinlock_t lock;
};

struct vmw_fifo_state {
unsigned long reserved_size;
__le32 *dynamic_buffer;
Expand All @@ -115,6 +122,7 @@ struct vmw_fifo_state {
uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
struct vmw_fence_queue fence_queue;
};

struct vmw_relocation {
Expand Down Expand Up @@ -179,6 +187,7 @@ struct vmw_private {
uint32_t vga_red_mask;
uint32_t vga_blue_mask;
uint32_t vga_green_mask;
uint32_t vga_pitchlock;

/*
* Framebuffer info.
Expand Down Expand Up @@ -393,6 +402,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);

/**
* TTM glue - vmwgfx_ttm_glue.c
Expand Down Expand Up @@ -441,6 +451,23 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
uint32_t sequence,
bool interruptible,
unsigned long timeout);
extern void vmw_update_sequence(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo_state);


/**
* Rudimentary fence objects currently used only for throttling -
* vmwgfx_fence.c
*/

extern void vmw_fence_queue_init(struct vmw_fence_queue *queue);
extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue);
extern int vmw_fence_push(struct vmw_fence_queue *queue,
uint32_t sequence);
extern int vmw_fence_pull(struct vmw_fence_queue *queue,
uint32_t signaled_sequence);
extern int vmw_wait_lag(struct vmw_private *dev_priv,
struct vmw_fence_queue *queue, uint32_t us);

/**
* Kernel framebuffer - vmwgfx_fb.c
Expand All @@ -466,6 +493,9 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
SVGA3dCmdHeader *header);
void vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bbp, unsigned depth);

/**
* Overlay control - vmwgfx_overlay.c
Expand Down
9 changes: 9 additions & 0 deletions trunk/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
Original file line number Diff line number Diff line change
Expand Up @@ -669,6 +669,15 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
goto out_err;

vmw_apply_relocations(sw_context);

if (arg->throttle_us) {
ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
arg->throttle_us);

if (unlikely(ret != 0))
goto out_err;
}

vmw_fifo_commit(dev_priv, arg->command_size);

ret = vmw_fifo_send_fence(dev_priv, &sequence);
Expand Down
Loading

0 comments on commit 4a43dfe

Please sign in to comment.