| /openbsd-src/sys/dev/pci/drm/include/linux/ |
| H A D | rbtree.h | 34 struct rb_node { struct 35 RB_ENTRY(rb_node) __entry; 45 struct rb_node *rb_node; member 55 int panic_cmp(struct rb_node *one, struct rb_node *two); 57 RB_HEAD(linux_root, rb_node); 58 RB_PROTOTYPE(linux_root, rb_node, __entry, panic_cmp); 70 #define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) 91 static inline struct rb_node * 92 __rb_deepest_left(struct rb_node *node) in __rb_deepest_left() 94 struct rb_node *parent = NULL; in __rb_deepest_left() [all …]
|
| H A D | interval_tree.h | 9 struct rb_node rb;
|
| /openbsd-src/sys/dev/pci/drm/ |
| H A D | drm_vma_manager.c | 145 struct rb_node *iter; in drm_vma_offset_lookup_locked() 148 iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node; in drm_vma_offset_lookup_locked() 246 struct rb_node **iter; in vma_node_allow() 247 struct rb_node *parent = NULL; in vma_node_allow() 259 iter = &node->vm_files.rb_node; in vma_node_allow() 361 struct rb_node *iter; in drm_vma_node_revoke() 365 iter = node->vm_files.rb_node; in drm_vma_node_revoke() 402 struct rb_node *iter; in drm_vma_node_is_allowed() 406 iter = node->vm_files.rb_node; in drm_vma_node_is_allowed()
|
| H A D | drm_mm.c | 165 struct rb_node *rb; in INTERVAL_TREE_DEFINE() 195 struct rb_node **link, *rb; in drm_mm_interval_tree_add_node() 217 link = &mm->interval_tree.rb_root.rb_node; in drm_mm_interval_tree_add_node() 244 struct rb_node **link = &root.rb_node, *rb = NULL; \ 260 static u64 rb_to_hole_size(struct rb_node *rb) in rb_to_hole_size() 268 struct rb_node **link = &root->rb_root.rb_node, *rb = NULL; in insert_hole_size() 312 static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb) in rb_hole_size_to_node() 317 static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *r [all...] |
| H A D | drm_prime.c | 92 struct rb_node dmabuf_rb; 93 struct rb_node handle_rb; 100 struct rb_node **p, *rb; in drm_prime_add_buf_handle() 111 p = &prime_fpriv->dmabufs.rb_node; in drm_prime_add_buf_handle() 126 p = &prime_fpriv->handles.rb_node; in drm_prime_add_buf_handle() 146 struct rb_node *rb; in drm_prime_lookup_buf_by_handle() 148 rb = prime_fpriv->handles.rb_node; in drm_prime_lookup_buf_by_handle() 168 struct rb_node *rb; in drm_prime_lookup_buf_handle() 170 rb = prime_fpriv->dmabufs.rb_node; in drm_prime_lookup_buf_handle() 191 struct rb_node *rb; in drm_prime_remove_buf_handle() [all …]
|
| H A D | drm_linux.c | 756 panic_cmp(struct rb_node *a, struct rb_node *b) in panic_cmp() 764 RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 3023 struct rb_node *rb; in interval_tree_iter_first() 3044 struct rb_node **iter = &root->rb_root.rb_node; in interval_tree_insert() 3045 struct rb_node *parent = NULL; in interval_tree_insert()
|
| /openbsd-src/sys/dev/pci/drm/i915/gt/ |
| H A D | intel_engine_user.c | 19 struct rb_node *p = i915->uabi_engines.rb_node; in intel_engine_lookup_user() 60 container_of((struct rb_node *)A, typeof(*a), uabi_node); in engine_cmp() 62 container_of((struct rb_node *)B, typeof(*b), uabi_node); in engine_cmp() 89 container_of((struct rb_node *)pos, typeof(*engine), in sort_engines() 257 struct rb_node **p, *prev; in intel_engines_driver_register() 263 p = &i915->uabi_engines.rb_node; in intel_engines_driver_register() 266 container_of((struct rb_node *)it, typeof(*engine), in intel_engines_driver_register()
|
| H A D | intel_execlists_submission.c | 192 struct rb_node rb; 251 static struct i915_priolist *to_priolist(struct rb_node *rb) in to_priolist() 281 struct rb_node *rb; in queue_prio() 292 struct rb_node *rb = rb_first_cached(&el->virtual); in virtual_prio() 1031 struct rb_node *rb = rb_first_cached(&el->virtual); in first_virtual_engine() 1284 struct rb_node *rb; in execlists_dequeue() 3157 struct rb_node *rb; in execlists_reset_cancel() 3642 * the rb_node into a sibling. in rcu_virtual_context_destroy() 3649 struct rb_node *node = &ve->nodes[sibling->id].rb; in rcu_virtual_context_destroy() 3840 struct rb_node **paren in virtual_submission_tasklet() [all...] |
| H A D | intel_engine_types.h | 410 struct rb_node uabi_node;
|
| /openbsd-src/sys/dev/pci/drm/i915/ |
| H A D | i915_vma_resource.c | 49 struct rb_node *rb; in vma_res_itree_iter_first() 63 struct rb_node *rb = &node->rb; in vma_res_itree_iter_next() 84 struct rb_node **iter = &root->rb_root.rb_node; in vma_res_itree_insert() 85 struct rb_node *parent = NULL; in vma_res_itree_insert()
|
| H A D | i915_active.c | 27 struct rb_node node; 143 ref->cache = fetch_node(ref->tree.rb_node); in __active_retire() 152 rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node); in __active_retire() 154 GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node); in __active_retire() 276 it = fetch_node(ref->tree.rb_node); in __active_lookup() 296 struct rb_node **p, *parent; in active_instance() 306 p = &ref->tree.rb_node; in active_instance() 792 struct rb_node *prev, *p; in reuse_idle_barrier() 812 p = ref->tree.rb_node; in reuse_idle_barrier() 828 * No quick match, but we did find the leftmost rb_node fo in reuse_idle_barrier() [all...] |
| H A D | i915_scheduler.c | 34 static inline struct i915_priolist *to_priolist(struct rb_node *rb) in to_priolist() 41 struct rb_node *rb; in assert_priolists() 63 struct rb_node **parent, *rb; in i915_sched_lookup_priolist() 75 parent = &sched_engine->queue.rb_root.rb_node; in i915_sched_lookup_priolist()
|
| H A D | i915_priolist_types.h | 43 struct rb_node node;
|
| H A D | i915_vma_types.h | 295 struct rb_node obj_node;
|
| H A D | i915_vma_resource.h | 107 struct rb_node rb;
|
| /openbsd-src/sys/dev/pci/drm/include/drm/ |
| H A D | drm_mm.h | 168 struct rb_node rb; 169 struct rb_node rb_hole_size; 170 struct rb_node rb_hole_addr;
|
| H A D | drm_vma_manager.h | 47 struct rb_node vm_rb;
|
| H A D | gpu_scheduler.h | 243 struct rb_node rb_tree_node;
|
| /openbsd-src/sys/dev/pci/drm/i915/gvt/ |
| H A D | kvmgt.c | 98 struct rb_node gfn_node; 99 struct rb_node dma_addr_node; 212 struct rb_node *node = vgpu->dma_addr_cache.rb_node; in __gvt_cache_find_dma_addr() 230 struct rb_node *node = vgpu->gfn_cache.rb_node; in __gvt_cache_find_gfn() 250 struct rb_node **link, *parent = NULL; in __gvt_cache_add() 263 link = &vgpu->gfn_cache.rb_node; in __gvt_cache_add() 278 link = &vgpu->dma_addr_cache.rb_node; in __gvt_cache_add() 307 struct rb_node *node = NULL; in gvt_cache_destroy()
|
| /openbsd-src/sys/dev/pci/drm/scheduler/ |
| H A D | sched_main.c | 81 static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a, in drm_sched_entity_compare_before() 82 const struct rb_node *b) in drm_sched_entity_compare_before() 243 struct rb_node *rb; in drm_sched_rq_select_entity_fifo()
|
| /openbsd-src/sys/dev/pci/drm/i915/gem/ |
| H A D | i915_gem_object_types.h | 233 struct rb_node offset;
|
| H A D | i915_gem_mman.c | 1043 struct rb_node *rb; in mmap_offset_attach() 1046 rb = obj->mmo.offsets.rb_node; in mmap_offset_attach() 1069 struct rb_node *rb, **p; 1073 p = &obj->mmo.offsets.rb_node; in __assign_mmap_offset()
|
| /openbsd-src/sys/dev/pci/drm/amd/amdgpu/ |
| H A D | amdgpu_object.h | 67 struct rb_node rb;
|
| H A D | amdgpu_vm.c | 101 struct rb_node *rb; 115 struct rb_node *rb = &node->rb; 136 struct rb_node **iter = &root->rb_root.rb_node; 137 struct rb_node *parent = NULL;
|
| /openbsd-src/sys/dev/pci/drm/amd/amdkfd/ |
| H A D | kfd_svm.c | 2686 struct rb_node *rb_node; in svm_range_get_range_boundaries() 2706 rb_node = rb_prev(&node->rb); 2711 rb_node = rb_last(&p->svms.objects.rb_root); in svm_range_check_vm_userptr() 2713 if (rb_node) { in svm_range_check_vm_userptr() 2714 node = container_of(rb_node, struct interval_tree_node, rb); in svm_range_check_vm_userptr() 2757 mapping = container_of((struct rb_node *)node, 3265 mapping = container_of((struct rb_node *)node, 2660 struct rb_node *rb_node; svm_range_get_range_boundaries() local
|