Lines Matching defs:node

79  * steep cliff not a real concern. Removing a node again is O(1).
107 static noinline void save_stack(struct drm_mm_node *node)
115 node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
120 struct drm_mm_node *node;
129 list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
130 if (!node->stack) {
131 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
132 node->start, node->size);
136 nr_entries = stack_depot_fetch(node->stack, &entries);
138 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
139 node->start, node->size, buf);
148 static void save_stack(struct drm_mm_node *node) { }
152 #define START(node) ((node)->start)
153 #define LAST(node) ((node)->start + (node)->size - 1)
164 struct drm_mm_node *node;
168 node = rb_entry(rb, typeof(*node), rb);
169 if (LAST(node) >= start && START(node) <= last)
170 return node;
176 drm_mm_interval_tree_remove(struct drm_mm_node *node,
179 rb_erase_cached(&node->rb, root);
192 struct drm_mm_node *node)
199 node->__subtree_last = LAST(node);
205 if (parent->__subtree_last >= node->__subtree_last)
208 parent->__subtree_last = node->__subtree_last;
224 if (parent->__subtree_last < node->__subtree_last)
225 parent->__subtree_last = node->__subtree_last;
226 if (node->start < parent->start) {
234 rb_link_node(&node->rb, rb, link);
236 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
239 rb_insert_color_cached(&node->rb, &mm->interval_tree, leftmost);
245 u64 x = expr(node); \
253 rb_link_node(&node->member, rb, link); \
254 rb_insert_color(&node->member, &root); \
266 struct drm_mm_node *node)
269 u64 x = node->hole_size;
282 rb_link_node(&node->rb_hole_size, rb, link);
283 rb_insert_color_cached(&node->rb_hole_size, root, first);
286 static void add_hole(struct drm_mm_node *node)
288 struct drm_mm *mm = node->mm;
290 node->hole_size =
291 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
292 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
294 insert_hole_size(&mm->holes_size, node);
297 list_add(&node->hole_stack, &mm->hole_stack);
300 static void rm_hole(struct drm_mm_node *node)
302 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
304 list_del(&node->hole_stack);
305 rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
306 rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
307 node->hole_size = 0;
309 DRM_MM_BUG_ON(drm_mm_hole_follows(node));
333 struct drm_mm_node *node =
336 if (size <= node->hole_size) {
337 best = node;
350 struct drm_mm_node *node = NULL;
355 node = rb_hole_addr_to_node(rb);
356 hole_start = __drm_mm_hole_node_start(node);
359 rb = node->rb_hole_addr.rb_left;
360 else if (addr > hole_start + node->hole_size)
361 rb = node->rb_hole_addr.rb_right;
366 return node;
394 struct drm_mm_node *node,
400 return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
403 return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
406 return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
409 node = list_next_entry(node, hole_stack);
410 return &node->hole_stack == &mm->hole_stack ? NULL : node;
415 * drm_mm_reserve_node - insert an pre-initialized node
416 * @mm: drm_mm allocator to insert @node into
417 * @node: drm_mm_node to insert
426 * 0 on success, -ENOSPC if there's no hole where @node is.
428 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
435 end = node->start + node->size;
436 if (unlikely(end <= node->start))
439 /* Find the relevant hole to add our node to */
440 hole = find_hole(mm, node->start);
448 mm->color_adjust(hole, node->color, &adj_start, &adj_end);
450 if (adj_start > node->start || adj_end < end)
453 node->mm = mm;
455 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
456 list_add(&node->node_list, &hole->node_list);
457 drm_mm_interval_tree_add_node(hole, node);
458 node->hole_size = 0;
461 if (node->start > hole_start)
464 add_hole(node);
466 save_stack(node);
477 * drm_mm_insert_node_in_range - ranged search for space and insert @node
479 * @node: preallocate node to insert
482 * @color: opaque tag value to use for this node
483 * @range_start: start of the allowed range for this node
484 * @range_end: end of the allowed range for this node
487 * The preallocated @node must be cleared to 0.
493 struct drm_mm_node * const node,
568 node->mm = mm;
569 node->size = size;
570 node->start = adj_start;
571 node->color = color;
572 node->hole_size = 0;
574 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
575 list_add(&node->node_list, &hole->node_list);
576 drm_mm_interval_tree_add_node(hole, node);
582 add_hole(node);
584 save_stack(node);
592 static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
594 return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
598 * drm_mm_remove_node - Remove a memory node from the allocator.
599 * @node: drm_mm_node to remove
601 * This just removes a node from its drm_mm allocator. The node does not need to
603 * allocator. It is a bug to call this function on a unallocated node.
605 void drm_mm_remove_node(struct drm_mm_node *node)
607 struct drm_mm *mm = node->mm;
610 DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
611 DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
613 prev_node = list_prev_entry(node, node_list);
615 if (drm_mm_hole_follows(node))
616 rm_hole(node);
618 drm_mm_interval_tree_remove(node, &mm->interval_tree);
619 list_del(&node->node_list);
625 clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
690 * since freeing a node is also O(1) the overall complexity is
748 * drm_mm_scan_add_block - add a node to the scan list
750 * @node: drm_mm_node to add
752 * Add a node to the scan list that might be freed to make space for the desired
759 struct drm_mm_node *node)
767 DRM_MM_BUG_ON(node->mm != mm);
768 DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
769 DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
770 __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
774 * (distance between the end of our previous node and the start of
778 hole = list_prev_entry(node, node_list);
779 DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
780 __list_del_entry(&node->node_list);
831 * drm_mm_scan_remove_block - remove a node from the scan list
833 * @node: drm_mm_node to remove
850 struct drm_mm_node *node)
854 DRM_MM_BUG_ON(node->mm != scan->mm);
855 DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
856 __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
858 DRM_MM_BUG_ON(!node->mm->scan_active);
859 node->mm->scan_active--;
861 /* During drm_mm_scan_add_block() we decoupled this node leaving
865 * backwards correctly we check that prev_node->next == node->next,
866 * i.e. both believe the same node should be on the other side of the
869 prev_node = list_prev_entry(node, node_list);
871 list_next_entry(node, node_list));
872 list_add(&node->node_list, &prev_node->node_list);
874 return (node->start + node->size > scan->hit_start &&
875 node->start < scan->hit_end);
888 * A node to evict, or NULL if there are no overlapping nodes.