Home | History | Annotate | Download | only in nouveau

Lines Matching refs:chunk

94 	struct nouveau_dmem_chunk *chunk = page->zone_device_data;
95 unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
97 return (idx << PAGE_SHIFT) + chunk->bo->bo.offset;
102 struct nouveau_dmem_chunk *chunk = page->zone_device_data;
103 unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
112 spin_lock(&chunk->lock);
113 clear_bit(idx, chunk->bitmap);
114 WARN_ON(!chunk->callocated);
115 chunk->callocated--;
117 * FIXME when chunk->callocated reach 0 we should add the chunk to
120 spin_unlock(&chunk->lock);
217 struct nouveau_dmem_chunk *chunk;
224 chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
227 if (chunk == NULL) {
232 list_del(&chunk->list);
237 &chunk->bo);
241 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
243 nouveau_bo_ref(NULL, &chunk->bo);
247 bitmap_zero(chunk->bitmap, DMEM_CHUNK_NPAGES);
248 spin_lock_init(&chunk->lock);
252 if (chunk->bo)
253 list_add(&chunk->list, &drm->dmem->chunk_empty);
255 list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
264 struct nouveau_dmem_chunk *chunk;
266 chunk = list_first_entry_or_null(&drm->dmem->chunk_free,
269 if (chunk)
270 return chunk;
272 chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
275 if (chunk->bo)
276 return chunk;
286 struct nouveau_dmem_chunk *chunk;
296 chunk = nouveau_dmem_chunk_first_free_locked(drm);
297 if (chunk == NULL) {
309 spin_lock(&chunk->lock);
310 i = find_first_zero_bit(chunk->bitmap, DMEM_CHUNK_NPAGES);
312 pages[c] = chunk->pfn_first + i;
313 set_bit(i, chunk->bitmap);
314 chunk->callocated++;
317 i = find_next_zero_bit(chunk->bitmap,
320 spin_unlock(&chunk->lock);
355 struct nouveau_dmem_chunk *chunk;
362 list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
363 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
367 list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
368 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
378 struct nouveau_dmem_chunk *chunk;
384 list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
385 nouveau_bo_unpin(chunk->bo);
387 list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
388 nouveau_bo_unpin(chunk->bo);
396 struct nouveau_dmem_chunk *chunk, *tmp;
406 list_for_each_entry_safe (chunk, tmp, &drm->dmem->chunk_empty, list) {
407 if (chunk->bo) {
408 nouveau_bo_unpin(chunk->bo);
409 nouveau_bo_ref(NULL, &chunk->bo);
411 list_del(&chunk->list);
412 spin_lock_destroy(&chunk->lock);
413 kfree(chunk);
540 struct nouveau_dmem_chunk *chunk;
544 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
545 if (chunk == NULL) {
550 chunk->drm = drm;
551 chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES);
552 list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
554 page = pfn_to_page(chunk->pfn_first);
556 page->zone_device_data = chunk;