Lines Matching defs:bo_va

762  * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
769 * Returns the found bo_va or NULL if none is found
774 * Found bo_va or NULL.
1068 static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
1071 struct amdgpu_vm *vm = bo_va->base.vm;
1072 struct amdgpu_bo *bo = bo_va->base.bo;
1093 struct amdgpu_bo_va *bo_va, *tmp;
1096 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
1097 amdgpu_vm_bo_get_memory(bo_va, stats);
1099 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
1100 amdgpu_vm_bo_get_memory(bo_va, stats);
1102 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
1103 amdgpu_vm_bo_get_memory(bo_va, stats);
1105 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
1106 amdgpu_vm_bo_get_memory(bo_va, stats);
1108 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
1109 amdgpu_vm_bo_get_memory(bo_va, stats);
1111 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
1112 amdgpu_vm_bo_get_memory(bo_va, stats);
1120 * @bo_va: requested BO and VM object
1123 * Fill in the page table entries for @bo_va.
1128 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
1131 struct amdgpu_bo *bo = bo_va->base.bo;
1132 struct amdgpu_vm *vm = bo_va->base.vm;
1151 if (obj->import_attach && bo_va->is_xgmi) {
1186 last_update = &bo_va->last_pt_update;
1188 if (!clear && bo_va->base.moved) {
1190 list_splice_init(&bo_va->valids, &bo_va->invalids);
1192 } else if (bo_va->cleared != clear) {
1193 list_splice_init(&bo_va->valids, &bo_va->invalids);
1196 list_for_each_entry(mapping, &bo_va->invalids, list) {
1199 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1229 amdgpu_vm_bo_evicted(&bo_va->base);
1231 amdgpu_vm_bo_idle(&bo_va->base);
1233 amdgpu_vm_bo_done(&bo_va->base);
1236 list_splice_init(&bo_va->invalids, &bo_va->valids);
1237 bo_va->cleared = clear;
1238 bo_va->base.moved = false;
1241 list_for_each_entry(mapping, &bo_va->valids, list)
1445 struct amdgpu_bo_va *bo_va;
1452 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1457 r = amdgpu_vm_bo_update(adev, bo_va, false);
1464 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1466 resv = bo_va->base.bo->tbo.base.resv;
1476 r = amdgpu_vm_bo_update(adev, bo_va, clear);
1500 * Newly added bo_va or NULL for failure
1508 struct amdgpu_bo_va *bo_va;
1510 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL);
1511 if (bo_va == NULL) {
1514 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1516 bo_va->ref_count = 1;
1517 bo_va->last_pt_update = dma_fence_get_stub();
1518 INIT_LIST_HEAD(&bo_va->valids);
1519 INIT_LIST_HEAD(&bo_va->invalids);
1522 return bo_va;
1526 bo_va->is_xgmi = true;
1531 return bo_va;
1539 * @bo_va: bo_va to store the address
1545 struct amdgpu_bo_va *bo_va,
1548 struct amdgpu_vm *vm = bo_va->base.vm;
1549 struct amdgpu_bo *bo = bo_va->base.bo;
1551 mapping->bo_va = bo_va;
1552 list_add(&mapping->list, &bo_va->invalids);
1559 !bo_va->base.moved) {
1560 amdgpu_vm_bo_moved(&bo_va->base);
1562 trace_amdgpu_vm_bo_map(bo_va, mapping);
1600 * @bo_va: bo_va to store the address
1614 struct amdgpu_bo_va *bo_va,
1619 struct amdgpu_bo *bo = bo_va->base.bo;
1620 struct amdgpu_vm *vm = bo_va->base.vm;
1649 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1658 * @bo_va: bo_va to store the address
1673 struct amdgpu_bo_va *bo_va,
1678 struct amdgpu_bo *bo = bo_va->base.bo;
1691 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1705 amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
1714 * @bo_va: bo_va to remove the address from
1725 struct amdgpu_bo_va *bo_va,
1729 struct amdgpu_vm *vm = bo_va->base.vm;
1734 list_for_each_entry(mapping, &bo_va->valids, list) {
1739 if (&mapping->list == &bo_va->valids) {
1742 list_for_each_entry(mapping, &bo_va->invalids, list) {
1747 if (&mapping->list == &bo_va->invalids)
1753 mapping->bo_va = NULL;
1754 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1760 bo_va->last_pt_update);
1816 before->bo_va = tmp->bo_va;
1817 list_add(&before->list, &tmp->bo_va->invalids);
1827 after->bo_va = tmp->bo_va;
1828 list_add(&after->list, &tmp->bo_va->invalids);
1847 tmp->bo_va = NULL;
1854 struct amdgpu_bo *bo = before->bo_va->base.bo;
1861 !before->bo_va->base.moved)
1862 amdgpu_vm_bo_moved(&before->bo_va->base);
1869 struct amdgpu_bo *bo = after->bo_va->base.bo;
1876 !after->bo_va->base.moved)
1877 amdgpu_vm_bo_moved(&after->bo_va->base);
1920 if (mapping->bo_va && mapping->bo_va->base.bo) {
1923 bo = mapping->bo_va->base.bo;
1937 * @bo_va: requested bo_va
1939 * Remove @bo_va->bo from the requested vm.
1944 struct amdgpu_bo_va *bo_va)
1947 struct amdgpu_bo *bo = bo_va->base.bo;
1948 struct amdgpu_vm *vm = bo_va->base.vm;
1958 for (base = &bo_va->base.bo->vm_bo; *base;
1960 if (*base != &bo_va->base)
1963 *base = bo_va->base.next;
1969 list_del(&bo_va->base.vm_status);
1972 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1975 mapping->bo_va = NULL;
1976 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1979 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1983 bo_va->last_pt_update);
1986 dma_fence_put(bo_va->last_pt_update);
1988 if (bo && bo_va->is_xgmi)
1991 kfree(bo_va);
2757 struct amdgpu_bo_va *bo_va, *tmp;
2774 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
2775 if (!bo_va->base.bo)
2777 total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2783 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
2784 if (!bo_va->base.bo)
2786 total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2792 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
2793 if (!bo_va->base.bo)
2795 total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2801 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
2802 if (!bo_va->base.bo)
2804 total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2810 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
2811 if (!bo_va->base.bo)
2813 total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
2819 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
2820 if (!bo_va->base.bo)
2822 total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);