Lines Matching full:order

56  * @vaddr: original vaddr return for the mapping and order in the lower bits
85 /* Allocate pages of size 1 << order with the given gfp_flags */
87 unsigned int order) in ttm_pool_alloc_page() argument
94 /* Don't set the __GFP_COMP flag for higher order allocations. in ttm_pool_alloc_page()
98 if (order) in ttm_pool_alloc_page()
103 p = alloc_pages_node(pool->nid, gfp_flags, order); in ttm_pool_alloc_page()
105 p->private = order; in ttm_pool_alloc_page()
114 if (order) in ttm_pool_alloc_page()
117 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, in ttm_pool_alloc_page()
130 dma->vaddr = (unsigned long)vaddr | order; in ttm_pool_alloc_page()
139 /* Reset the caching and pages of size 1 << order */
141 unsigned int order, struct page *p) in ttm_pool_free_page() argument
152 set_pages_wb(p, 1 << order); in ttm_pool_free_page()
156 __free_pages(p, order); in ttm_pool_free_page()
160 if (order) in ttm_pool_free_page()
165 dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr, in ttm_pool_free_page()
173 gfp_t gfp_flags, unsigned int order, in ttm_pool_alloc_page() argument
192 if (bus_dmamap_create(dmat, (1ULL << order) * PAGE_SIZE, 1, in ttm_pool_alloc_page()
193 (1ULL << order) * PAGE_SIZE, 0, flags | dmaflags, &dma->map)) in ttm_pool_alloc_page()
196 if (bus_dmamem_alloc_range(dmat, (1ULL << order) * PAGE_SIZE, in ttm_pool_alloc_page()
203 if (bus_dmamem_alloc(dmat, (1ULL << order) * PAGE_SIZE, in ttm_pool_alloc_page()
210 (1ULL << order) * PAGE_SIZE, flags)) { in ttm_pool_alloc_page()
233 unsigned int order, struct vm_page *p) in ttm_pool_free_page() argument
242 set_pages_wb(p, 1 << order); in ttm_pool_free_page()
278 /* Map pages of 1 << order size and fill the DMA address array */
279 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, in ttm_pool_map() argument
290 size_t size = (1ULL << order) * PAGE_SIZE; in ttm_pool_map()
297 for (i = 1 << order; i ; --i) { in ttm_pool_map()
305 /* Unmap pages of 1 << order size */
319 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, in ttm_pool_map() argument
329 for (i = 1 << order; i ; --i) { in ttm_pool_map()
347 unsigned int i, num_pages = 1 << pt->order; in ttm_pool_type_give()
364 atomic_long_add(1 << pt->order, &allocated_pages); in ttm_pool_type_give()
377 atomic_long_sub(1 << pt->order, &allocated_pages); in ttm_pool_type_take()
388 enum ttm_caching caching, unsigned int order) in ttm_pool_type_init() argument
392 pt->order = order; in ttm_pool_type_init()
413 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); in ttm_pool_type_fini()
422 /* Return the pool_type to use for the given caching and order */
425 unsigned int order) in ttm_pool_select_type() argument
428 return &pool->caching[caching].orders[order]; in ttm_pool_select_type()
434 return &pool->caching[caching].orders[order]; in ttm_pool_select_type()
437 return &global_dma32_write_combined[order]; in ttm_pool_select_type()
439 return &global_write_combined[order]; in ttm_pool_select_type()
442 return &pool->caching[caching].orders[order]; in ttm_pool_select_type()
445 return &global_dma32_uncached[order]; in ttm_pool_select_type()
447 return &global_uncached[order]; in ttm_pool_select_type()
470 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); in ttm_pool_shrink()
471 num_pages = 1 << pt->order; in ttm_pool_shrink()
481 /* Return the allocation order based for a page */
496 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, in ttm_pool_page_allocated() argument
506 r = ttm_pool_map(pool, order, p, dma_addr); in ttm_pool_page_allocated()
511 *num_pages -= 1 << order; in ttm_pool_page_allocated()
512 for (i = 1 << order; i; --i, ++(*pages), ++p, ++(*orders)) { in ttm_pool_page_allocated()
514 **orders = order; in ttm_pool_page_allocated()
538 unsigned int order; in ttm_pool_free_range() local
544 order = tt->orders[i]; in ttm_pool_free_range()
545 nr = (1UL << order); in ttm_pool_free_range()
549 pt = ttm_pool_select_type(pool, caching, order); in ttm_pool_free_range()
553 ttm_pool_free_page(pool, caching, order, *pages); in ttm_pool_free_range()
579 unsigned int order; in ttm_pool_alloc() local
600 for (order = min_t(unsigned int, MAX_ORDER, __fls(num_pages)); in ttm_pool_alloc()
602 order = min_t(unsigned int, order, __fls(num_pages))) { in ttm_pool_alloc()
606 pt = ttm_pool_select_type(pool, tt->caching, order); in ttm_pool_alloc()
616 r = ttm_pool_page_allocated(pool, order, p, in ttm_pool_alloc()
624 if (num_pages < (1 << order)) in ttm_pool_alloc()
632 while (num_pages >= (1 << order) && in ttm_pool_alloc()
633 (p = ttm_pool_alloc_page(pool, gfp_flags, order, tt->dmat))) { in ttm_pool_alloc()
642 r = ttm_pool_page_allocated(pool, order, p, &dma_addr, in ttm_pool_alloc()
651 if (order) { in ttm_pool_alloc()
652 --order; in ttm_pool_alloc()
667 ttm_pool_free_page(pool, page_caching, order, p); in ttm_pool_alloc()
810 /* Print a nice header for the order */