diff --git a/[refs] b/[refs] index a806ee36014e..927dbda08848 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 4abe4389790d5f02569fbacdf035536ba84c7d44 +refs/heads/master: 7e71f8a59e1c9adbbc3b737b4b818c8aa4169d0e diff --git a/trunk/drivers/gpu/drm/ttm/ttm_page_alloc.c b/trunk/drivers/gpu/drm/ttm/ttm_page_alloc.c index ef910694bd63..0d9a42c2394f 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -77,7 +77,7 @@ struct ttm_page_pool { /** * Limits for the pool. They are handled without locks because only place where * they may change is in sysfs store. They won't have immediate effect anyway - * so forcing serialization to access them is pointless. + * so forcing serialiazation to access them is pointless. */ struct ttm_pool_opts { @@ -165,18 +165,16 @@ static ssize_t ttm_pool_store(struct kobject *kobj, m->options.small = val; else if (attr == &ttm_page_pool_alloc_size) { if (val > NUM_PAGES_TO_ALLOC*8) { - printk(KERN_ERR TTM_PFX - "Setting allocation size to %lu " - "is not allowed. Recommended size is " - "%lu\n", - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); + printk(KERN_ERR "[ttm] Setting allocation size to %lu " + "is not allowed. Recomended size is " + "%lu\n", + NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), + NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); return size; } else if (val > NUM_PAGES_TO_ALLOC) { - printk(KERN_WARNING TTM_PFX - "Setting allocation size to " - "larger than %lu is not recommended.\n", - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); + printk(KERN_WARNING "[ttm] Setting allocation size to " + "larger than %lu is not recomended.\n", + NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); } m->options.alloc_size = val; } @@ -279,7 +277,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages) { unsigned i; if (set_pages_array_wb(pages, npages)) - printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n", + printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n", npages); for (i = 0; i < npages; ++i) __free_page(pages[i]); @@ -315,8 +313,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), GFP_KERNEL); if (!pages_to_free) { - printk(KERN_ERR TTM_PFX - "Failed to allocate memory for pool free operation.\n"); + printk(KERN_ERR "Failed to allocate memory for pool free operation.\n"); return 0; } @@ -393,7 +390,7 @@ static int ttm_pool_get_num_unused_pages(void) } /** - * Callback for mm to request pool to reduce number of page held. + * Calback for mm to request pool to reduce number of page held. */ static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask) { @@ -436,16 +433,14 @@ static int ttm_set_pages_caching(struct page **pages, case tt_uncached: r = set_pages_array_uc(pages, cpages); if (r) - printk(KERN_ERR TTM_PFX - "Failed to set %d pages to uc!\n", - cpages); + printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n", + cpages); break; case tt_wc: r = set_pages_array_wc(pages, cpages); if (r) - printk(KERN_ERR TTM_PFX - "Failed to set %d pages to wc!\n", - cpages); + printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n", + cpages); break; default: break; @@ -463,7 +458,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages, struct page **failed_pages, unsigned cpages) { unsigned i; - /* Failed pages have to be freed */ + /* Failed pages has to be reed */ for (i = 0; i < cpages; ++i) { list_del(&failed_pages[i]->lru); __free_page(failed_pages[i]); @@ -490,8 +485,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); if (!caching_array) { - printk(KERN_ERR TTM_PFX - "Unable to allocate table for new pages."); + printk(KERN_ERR "[ttm] unable to allocate table for new pages."); return -ENOMEM; } @@ -499,13 +493,12 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, p = alloc_page(gfp_flags); if (!p) { - printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i); + printk(KERN_ERR "[ttm] unable to get page %u\n", i); /* store already allocated pages in the pool after * setting the caching state */ if (cpages) { - r = ttm_set_pages_caching(caching_array, - cstate, cpages); + r = ttm_set_pages_caching(caching_array, cstate, cpages); if (r) ttm_handle_caching_state_failure(pages, ttm_flags, cstate, @@ -597,8 +590,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, ++pool->nrefills; pool->npages += alloc_size; } else { - printk(KERN_ERR TTM_PFX - "Failed to fill pool (%p).", pool); + printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool); /* If we have any pages left put them to the pool. */ list_for_each_entry(p, &pool->list, lru) { ++cpages; @@ -679,14 +671,13 @@ int ttm_get_pages(struct list_head *pages, int flags, if (flags & TTM_PAGE_FLAG_DMA32) gfp_flags |= GFP_DMA32; else - gfp_flags |= GFP_HIGHUSER; + gfp_flags |= __GFP_HIGHMEM; for (r = 0; r < count; ++r) { p = alloc_page(gfp_flags); if (!p) { - printk(KERN_ERR TTM_PFX - "Unable to allocate page."); + printk(KERN_ERR "[ttm] unable to allocate page."); return -ENOMEM; } @@ -718,9 +709,8 @@ int ttm_get_pages(struct list_head *pages, int flags, if (r) { /* If there is any pages in the list put them back to * the pool. */ - printk(KERN_ERR TTM_PFX - "Failed to allocate extra pages " - "for large request."); + printk(KERN_ERR "[ttm] Failed to allocate extra pages " + "for large request."); ttm_put_pages(pages, 0, flags, cstate); return r; } @@ -788,7 +778,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) if (atomic_add_return(1, &_manager.page_alloc_inited) > 1) return 0; - printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); + printk(KERN_INFO "[ttm] Initializing pool allocator.\n"); ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc"); @@ -823,7 +813,7 @@ void ttm_page_alloc_fini() if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0) return; - printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); + printk(KERN_INFO "[ttm] Finilizing pool allocator.\n"); ttm_pool_mm_shrink_fini(&_manager); for (i = 0; i < NUM_POOLS; ++i) diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index bbc7c4c30bc7..642bcd7a1500 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -765,8 +765,9 @@ int vmw_kms_init(struct vmw_private *dev_priv) dev->mode_config.funcs = &vmw_kms_funcs; dev->mode_config.min_width = 1; dev->mode_config.min_height = 1; - dev->mode_config.max_width = dev_priv->fb_max_width; - dev->mode_config.max_height = dev_priv->fb_max_height; + /* assumed largest fb size */ + dev->mode_config.max_width = 8192; + dev->mode_config.max_height = 8192; ret = vmw_kms_init_legacy_display_system(dev_priv);