Skip to content

Commit

Permalink
Merge branch 'for-next' into for-linus
Browse files Browse the repository at this point in the history
  • Loading branch information
Al Viro committed Sep 12, 2013
2 parents bcceeeb + f5e1dd3 commit bf2ba3b
Show file tree
Hide file tree
Showing 54 changed files with 1,755 additions and 1,129 deletions.
25 changes: 18 additions & 7 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -4421,13 +4421,12 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
}
}

static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
static unsigned long
mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct kvm *kvm;
int nr_to_scan = sc->nr_to_scan;

if (nr_to_scan == 0)
goto out;
unsigned long freed = 0;

raw_spin_lock(&kvm_lock);

Expand Down Expand Up @@ -4462,25 +4461,37 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
goto unlock;
}

prepare_zap_oldest_mmu_page(kvm, &invalid_list);
if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
freed++;
kvm_mmu_commit_zap_page(kvm, &invalid_list);

unlock:
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);

/*
* unfair on small ones
* per-vm shrinkers cry out
* sadness comes quickly
*/
list_move_tail(&kvm->vm_list, &vm_list);
break;
}

raw_spin_unlock(&kvm_lock);
return freed;

out:
}

static unsigned long
mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
}

static struct shrinker mmu_shrinker = {
.shrink = mmu_shrink,
.count_objects = mmu_shrink_count,
.scan_objects = mmu_shrink_scan,
.seeks = DEFAULT_SEEKS * 10,
};

Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/i915/i915_dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -1667,7 +1667,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;

out_gem_unload:
if (dev_priv->mm.inactive_shrinker.shrink)
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);

if (dev->pdev->msi_enabled)
Expand Down Expand Up @@ -1706,7 +1706,7 @@ int i915_driver_unload(struct drm_device *dev)

i915_teardown_sysfs(dev);

if (dev_priv->mm.inactive_shrinker.shrink)
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);

mutex_lock(&dev->struct_mutex);
Expand Down
82 changes: 57 additions & 25 deletions drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,12 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);

static int i915_gem_inactive_shrink(struct shrinker *shrinker,
struct shrink_control *sc);
static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
struct shrink_control *sc);
static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
struct shrink_control *sc);
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);

static bool cpu_cache_is_coherent(struct drm_device *dev,
Expand Down Expand Up @@ -1736,16 +1738,21 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
return __i915_gem_shrink(dev_priv, target, true);
}

static void
static long
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj, *next;
long freed = 0;

i915_gem_evict_everything(dev_priv->dev);

list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
global_list)
global_list) {
if (obj->pages_pin_count == 0)
freed += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put_pages(obj);
}
return freed;
}

static int
Expand Down Expand Up @@ -4526,7 +4533,8 @@ i915_gem_load(struct drm_device *dev)

dev_priv->mm.interruptible = true;

dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker);
}
Expand Down Expand Up @@ -4749,54 +4757,44 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}

static int
i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
static unsigned long
i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker,
struct drm_i915_private,
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
int nr_to_scan = sc->nr_to_scan;
bool unlock = true;
int cnt;
unsigned long count;

if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return 0;
return SHRINK_STOP;

if (dev_priv->mm.shrinker_no_lock_stealing)
return 0;
return SHRINK_STOP;

unlock = false;
}

if (nr_to_scan) {
nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
if (nr_to_scan > 0)
nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
false);
if (nr_to_scan > 0)
i915_gem_shrink_all(dev_priv);
}

cnt = 0;
count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
count += obj->base.size >> PAGE_SHIFT;

list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->active)
continue;

if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
count += obj->base.size >> PAGE_SHIFT;
}

if (unlock)
mutex_unlock(&dev->struct_mutex);
return cnt;
return count;
}

/* All the new VM stuff */
Expand Down Expand Up @@ -4860,6 +4858,40 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
return 0;
}

static unsigned long
i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker,
struct drm_i915_private,
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
int nr_to_scan = sc->nr_to_scan;
unsigned long freed;
bool unlock = true;

if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return 0;

if (dev_priv->mm.shrinker_no_lock_stealing)
return 0;

unlock = false;
}

freed = i915_gem_purge(dev_priv, nr_to_scan);
if (freed < nr_to_scan)
freed += __i915_gem_shrink(dev_priv, nr_to_scan,
false);
if (freed < nr_to_scan)
freed += i915_gem_shrink_all(dev_priv);

if (unlock)
mutex_unlock(&dev->struct_mutex);
return freed;
}

struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
Expand Down
44 changes: 28 additions & 16 deletions drivers/gpu/drm/ttm/ttm_page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -377,28 +377,26 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
return nr_free;
}

/* Get good estimation how many pages are free in pools */
static int ttm_pool_get_num_unused_pages(void)
{
unsigned i;
int total = 0;
for (i = 0; i < NUM_POOLS; ++i)
total += _manager->pools[i].npages;

return total;
}

/**
* Callback for mm to request pool to reduce number of page held.
*
* XXX: (dchinner) Deadlock warning!
*
* ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
* this can deadlock when called a sc->gfp_mask that is not equal to
* GFP_KERNEL.
*
* This code is crying out for a shrinker per pool....
*/
static int ttm_pool_mm_shrink(struct shrinker *shrink,
struct shrink_control *sc)
static unsigned long
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned i;
unsigned pool_offset = atomic_add_return(1, &start_pool);
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
unsigned long freed = 0;

pool_offset = pool_offset % NUM_POOLS;
/* select start pool in round robin fashion */
Expand All @@ -408,14 +406,28 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
shrink_pages = ttm_page_pool_free(pool, nr_free);
freed += nr_free - shrink_pages;
}
/* return estimated number of unused pages in pool */
return ttm_pool_get_num_unused_pages();
return freed;
}


static unsigned long
ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
unsigned i;
unsigned long count = 0;

for (i = 0; i < NUM_POOLS; ++i)
count += _manager->pools[i].npages;

return count;
}

static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
manager->mm_shrink.count_objects = ttm_pool_shrink_count;
manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
Expand Down
51 changes: 32 additions & 19 deletions drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -918,19 +918,6 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
}
EXPORT_SYMBOL_GPL(ttm_dma_populate);

/* Get good estimation how many pages are free in pools */
static int ttm_dma_pool_get_num_unused_pages(void)
{
struct device_pools *p;
unsigned total = 0;

mutex_lock(&_manager->lock);
list_for_each_entry(p, &_manager->pools, pools)
total += p->pool->npages_free;
mutex_unlock(&_manager->lock);
return total;
}

/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
Expand Down Expand Up @@ -1002,18 +989,29 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);

/**
* Callback for mm to request pool to reduce number of page held.
*
* XXX: (dchinner) Deadlock warning!
*
* ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
* needs to be paid to sc->gfp_mask to determine if this can be done or not.
* GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
* bad.
*
* I'm getting sadder as I hear more pathetical whimpers about needing per-pool
* shrinkers
*/
static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
struct shrink_control *sc)
static unsigned long
ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned idx = 0;
unsigned pool_offset = atomic_add_return(1, &start_pool);
unsigned shrink_pages = sc->nr_to_scan;
struct device_pools *p;
unsigned long freed = 0;

if (list_empty(&_manager->pools))
return 0;
return SHRINK_STOP;

mutex_lock(&_manager->lock);
pool_offset = pool_offset % _manager->npools;
Expand All @@ -1029,18 +1027,33 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
continue;
nr_free = shrink_pages;
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
freed += nr_free - shrink_pages;

pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
p->pool->dev_name, p->pool->name, current->pid,
nr_free, shrink_pages);
}
mutex_unlock(&_manager->lock);
/* return estimated number of unused pages in pool */
return ttm_dma_pool_get_num_unused_pages();
return freed;
}

static unsigned long
ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
struct device_pools *p;
unsigned long count = 0;

mutex_lock(&_manager->lock);
list_for_each_entry(p, &_manager->pools, pools)
count += p->pool->npages_free;
mutex_unlock(&_manager->lock);
return count;
}

static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
Expand Down
Loading

0 comments on commit bf2ba3b

Please sign in to comment.