[PATCH 2/3] drm/ttm/dma: Fix accounting error when calling ttm_mem_global_free_page.

From: Konrad Rzeszutek Wilk
Date: Mon Dec 12 2011 - 15:10:41 EST


The code to figure out how many pages to shrink the pool
ends up capping the 'count' at _manager->options.max_size - which is OK.
Except that the 'count' is also used when accounting for how many pages
are recycled - which we end up with the invalid values. This fixes
it by using a different value for the amount of pages to shrink.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 10 ++++++----
1 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 6c06d0b..e57aa24 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -949,7 +949,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
struct dma_page *d_page, *next;
enum pool_type type;
bool is_cached = false;
- unsigned count = 0, i;
+ unsigned count = 0, i, npages;
unsigned long irq_flags;

type = ttm_to_type(ttm->page_flags, ttm->caching_state);
@@ -971,11 +971,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
pool->npages_in_use -= count;
if (is_cached) {
pool->nfrees += count;
+ npages = count;
} else {
pool->npages_free += count;
list_splice(&ttm_dma->pages_list, &pool->free_list);
+ npages = count;
if (pool->npages_free > _manager->options.max_size) {
- count = pool->npages_free - _manager->options.max_size;
+ npages = pool->npages_free - _manager->options.max_size;
}
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
@@ -1000,8 +1002,8 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
}

/* shrink pool if necessary */
- if (count)
- ttm_dma_page_pool_free(pool, count);
+ if (npages)
+ ttm_dma_page_pool_free(pool, npages);
ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
--
1.7.7.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/