diff --git a/drivers/video/tegra/nvmap/nvmap_pp.c b/drivers/video/tegra/nvmap/nvmap_pp.c index 599b76a74b6df8..af34da67826137 100644 --- a/drivers/video/tegra/nvmap/nvmap_pp.c +++ b/drivers/video/tegra/nvmap/nvmap_pp.c @@ -41,7 +41,6 @@ static int pool_size; static struct task_struct *background_allocator; static DECLARE_WAIT_QUEUE_HEAD(nvmap_bg_wait); -static struct page *pending_pages[PENDING_PAGES_SIZE]; #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG static inline void __pp_dbg_var_add(u64 *dbg_var, u32 nr) @@ -117,28 +116,34 @@ static void nvmap_pp_do_background_zero_pages(struct nvmap_page_pool *pool) struct page *page; int ret; + /* + * Statically declared array of pages to be zeroed in a batch, + * local to this thread but too big for the stack. + */ + static struct page *pending_zero_pages[PENDING_PAGES_SIZE]; + mutex_lock(&pool->lock); for (i = 0; i < PENDING_PAGES_SIZE; i++) { page = get_zero_list_page(pool); if (page == NULL) break; - pending_pages[i] = page; + pending_zero_pages[i] = page; } mutex_unlock(&pool->lock); - ret = nvmap_pp_zero_pages(pending_pages, i); + ret = nvmap_pp_zero_pages(pending_zero_pages, i); if (ret < 0) { ret = 0; goto out; } mutex_lock(&pool->lock); - ret = __nvmap_page_pool_fill_lots_locked(pool, pending_pages, i); + ret = __nvmap_page_pool_fill_lots_locked(pool, pending_zero_pages, i); mutex_unlock(&pool->lock); out: for (; ret < i; ret++) - __free_page(pending_pages[ret]); + __free_page(pending_zero_pages[ret]); } /* @@ -212,11 +217,9 @@ static struct page *nvmap_page_pool_alloc_locked(struct nvmap_page_pool *pool, * Alloc a bunch of pages from the page pool. This will alloc as many as it can * and return the number of pages allocated. Pages are placed into the passed * array in a linear fashion starting from index 0. - * - * You must lock the page pool before using this. */ -static int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool, - struct page **pages, u32 nr) +int nvmap_page_pool_alloc_lots(struct nvmap_page_pool *pool, + struct page **pages, u32 nr) { u32 real_nr; u32 ind = 0; @@ -224,6 +227,8 @@ static int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool, if (!enable_pp) return 0; + mutex_lock(&pool->lock); + real_nr = min_t(u32, nr, pool->count); while (real_nr--) { @@ -237,6 +242,7 @@ static int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool, BUG_ON(atomic_read(&page->_count) != 1); } } + mutex_unlock(&pool->lock); pp_alloc_add(pool, ind); pp_hit_add(pool, ind); @@ -245,18 +251,6 @@ static int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool, return ind; } -int nvmap_page_pool_alloc_lots(struct nvmap_page_pool *pool, - struct page **pages, u32 nr) -{ - int ret; - - mutex_lock(&pool->lock); - ret = __nvmap_page_pool_alloc_lots_locked(pool, pages, nr); - mutex_unlock(&pool->lock); - - return ret; -} - /* * Fill a bunch of pages into the page pool. This will fill as many as it can * and return the number of pages filled. Pages are used from the start of the