Skip to content

Commit 202b52b

Browse files
ickledanvet
authored andcommitted
drm: Track drm_mm nodes with an interval tree
In addition to the last-in/first-out stack for accessing drm_mm nodes, we occasionally and in the future often want to find a drm_mm_node by an address. To do so efficiently we need to track the nodes in an interval tree - lookups for a particular address will then be O(lg(N)), where N is the number of nodes in the range manager as opposed to O(N). Insertion however gains an extra O(lg(N)) step for all nodes irrespective of whether the interval tree is in use. For future i915 patches, eliminating the linear walk is a significant improvement. v2: Use generic interval-tree template for u64 and faster insertion. Signed-off-by: Chris Wilson <[email protected]> Cc: David Herrmann <[email protected]> Cc: [email protected] Reviewed-by: David Herrmann <[email protected]> Signed-off-by: Daniel Vetter <[email protected]> Link: http://patchwork.freedesktop.org/patch/msgid/[email protected]
1 parent 1d77685 commit 202b52b

File tree

2 files changed

+122
-23
lines changed

2 files changed

+122
-23
lines changed

drivers/gpu/drm/drm_mm.c

Lines changed: 110 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@
4646
#include <linux/slab.h>
4747
#include <linux/seq_file.h>
4848
#include <linux/export.h>
49+
#include <linux/interval_tree_generic.h>
4950

5051
/**
5152
* DOC: Overview
@@ -103,6 +104,72 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
103104
u64 end,
104105
enum drm_mm_search_flags flags);
105106

107+
#define START(node) ((node)->start)
108+
#define LAST(node) ((node)->start + (node)->size - 1)
109+
110+
INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
111+
u64, __subtree_last,
112+
START, LAST, static inline, drm_mm_interval_tree)
113+
114+
struct drm_mm_node *
115+
drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
116+
{
117+
return drm_mm_interval_tree_iter_first(&mm->interval_tree,
118+
start, last);
119+
}
120+
EXPORT_SYMBOL(drm_mm_interval_first);
121+
122+
struct drm_mm_node *
123+
drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last)
124+
{
125+
return drm_mm_interval_tree_iter_next(node, start, last);
126+
}
127+
EXPORT_SYMBOL(drm_mm_interval_next);
128+
129+
static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
130+
struct drm_mm_node *node)
131+
{
132+
struct drm_mm *mm = hole_node->mm;
133+
struct rb_node **link, *rb;
134+
struct drm_mm_node *parent;
135+
136+
node->__subtree_last = LAST(node);
137+
138+
if (hole_node->allocated) {
139+
rb = &hole_node->rb;
140+
while (rb) {
141+
parent = rb_entry(rb, struct drm_mm_node, rb);
142+
if (parent->__subtree_last >= node->__subtree_last)
143+
break;
144+
145+
parent->__subtree_last = node->__subtree_last;
146+
rb = rb_parent(rb);
147+
}
148+
149+
rb = &hole_node->rb;
150+
link = &hole_node->rb.rb_right;
151+
} else {
152+
rb = NULL;
153+
link = &mm->interval_tree.rb_node;
154+
}
155+
156+
while (*link) {
157+
rb = *link;
158+
parent = rb_entry(rb, struct drm_mm_node, rb);
159+
if (parent->__subtree_last < node->__subtree_last)
160+
parent->__subtree_last = node->__subtree_last;
161+
if (node->start < parent->start)
162+
link = &parent->rb.rb_left;
163+
else
164+
link = &parent->rb.rb_right;
165+
}
166+
167+
rb_link_node(&node->rb, rb, link);
168+
rb_insert_augmented(&node->rb,
169+
&mm->interval_tree,
170+
&drm_mm_interval_tree_augment);
171+
}
172+
106173
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
107174
struct drm_mm_node *node,
108175
u64 size, unsigned alignment,
@@ -153,6 +220,8 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
153220
INIT_LIST_HEAD(&node->hole_stack);
154221
list_add(&node->node_list, &hole_node->node_list);
155222

223+
drm_mm_interval_tree_add_node(hole_node, node);
224+
156225
BUG_ON(node->start + node->size > adj_end);
157226

158227
node->hole_follows = 0;
@@ -178,41 +247,52 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
178247
*/
179248
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
180249
{
250+
u64 end = node->start + node->size;
181251
struct drm_mm_node *hole;
182-
u64 end;
183-
u64 hole_start;
184-
u64 hole_end;
185-
186-
BUG_ON(node == NULL);
252+
u64 hole_start, hole_end;
187253

188254
end = node->start + node->size;
189255

190256
/* Find the relevant hole to add our node to */
191-
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
192-
if (hole_start > node->start || hole_end < end)
193-
continue;
257+
hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
258+
node->start, ~(u64)0);
259+
if (hole) {
260+
if (hole->start < end)
261+
return -ENOSPC;
262+
} else {
263+
hole = list_entry(&mm->head_node.node_list,
264+
typeof(*hole), node_list);
265+
}
194266

195-
node->mm = mm;
196-
node->allocated = 1;
267+
hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
268+
if (!hole->hole_follows)
269+
return -ENOSPC;
197270

198-
INIT_LIST_HEAD(&node->hole_stack);
199-
list_add(&node->node_list, &hole->node_list);
271+
hole_start = __drm_mm_hole_node_start(hole);
272+
hole_end = __drm_mm_hole_node_end(hole);
273+
if (hole_start > node->start || hole_end < end)
274+
return -ENOSPC;
200275

201-
if (node->start == hole_start) {
202-
hole->hole_follows = 0;
203-
list_del_init(&hole->hole_stack);
204-
}
276+
node->mm = mm;
277+
node->allocated = 1;
205278

206-
node->hole_follows = 0;
207-
if (end != hole_end) {
208-
list_add(&node->hole_stack, &mm->hole_stack);
209-
node->hole_follows = 1;
210-
}
279+
INIT_LIST_HEAD(&node->hole_stack);
280+
list_add(&node->node_list, &hole->node_list);
211281

212-
return 0;
282+
drm_mm_interval_tree_add_node(hole, node);
283+
284+
if (node->start == hole_start) {
285+
hole->hole_follows = 0;
286+
list_del_init(&hole->hole_stack);
287+
}
288+
289+
node->hole_follows = 0;
290+
if (end != hole_end) {
291+
list_add(&node->hole_stack, &mm->hole_stack);
292+
node->hole_follows = 1;
213293
}
214294

215-
return -ENOSPC;
295+
return 0;
216296
}
217297
EXPORT_SYMBOL(drm_mm_reserve_node);
218298

@@ -302,6 +382,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
302382
INIT_LIST_HEAD(&node->hole_stack);
303383
list_add(&node->node_list, &hole_node->node_list);
304384

385+
drm_mm_interval_tree_add_node(hole_node, node);
386+
305387
BUG_ON(node->start < start);
306388
BUG_ON(node->start < adj_start);
307389
BUG_ON(node->start + node->size > adj_end);
@@ -390,6 +472,7 @@ void drm_mm_remove_node(struct drm_mm_node *node)
390472
} else
391473
list_move(&prev_node->hole_stack, &mm->hole_stack);
392474

475+
drm_mm_interval_tree_remove(node, &mm->interval_tree);
393476
list_del(&node->node_list);
394477
node->allocated = 0;
395478
}
@@ -516,11 +599,13 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
516599
{
517600
list_replace(&old->node_list, &new->node_list);
518601
list_replace(&old->hole_stack, &new->hole_stack);
602+
rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
519603
new->hole_follows = old->hole_follows;
520604
new->mm = old->mm;
521605
new->start = old->start;
522606
new->size = old->size;
523607
new->color = old->color;
608+
new->__subtree_last = old->__subtree_last;
524609

525610
old->allocated = 0;
526611
new->allocated = 1;
@@ -758,6 +843,8 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
758843
mm->head_node.size = start - mm->head_node.start;
759844
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
760845

846+
mm->interval_tree = RB_ROOT;
847+
761848
mm->color_adjust = NULL;
762849
}
763850
EXPORT_SYMBOL(drm_mm_init);

include/drm/drm_mm.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
* Generic range manager structs
3838
*/
3939
#include <linux/bug.h>
40+
#include <linux/rbtree.h>
4041
#include <linux/kernel.h>
4142
#include <linux/list.h>
4243
#include <linux/spinlock.h>
@@ -61,6 +62,7 @@ enum drm_mm_allocator_flags {
6162
struct drm_mm_node {
6263
struct list_head node_list;
6364
struct list_head hole_stack;
65+
struct rb_node rb;
6466
unsigned hole_follows : 1;
6567
unsigned scanned_block : 1;
6668
unsigned scanned_prev_free : 1;
@@ -70,6 +72,7 @@ struct drm_mm_node {
7072
unsigned long color;
7173
u64 start;
7274
u64 size;
75+
u64 __subtree_last;
7376
struct drm_mm *mm;
7477
};
7578

@@ -79,6 +82,9 @@ struct drm_mm {
7982
/* head_node.node_list is the list of all memory nodes, ordered
8083
* according to the (increasing) start address of the memory node. */
8184
struct drm_mm_node head_node;
85+
/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
86+
struct rb_root interval_tree;
87+
8288
unsigned int scan_check_range : 1;
8389
unsigned scan_alignment;
8490
unsigned long scan_color;
@@ -295,6 +301,12 @@ void drm_mm_init(struct drm_mm *mm,
295301
void drm_mm_takedown(struct drm_mm *mm);
296302
bool drm_mm_clean(struct drm_mm *mm);
297303

304+
struct drm_mm_node *
305+
drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
306+
307+
struct drm_mm_node *
308+
drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last);
309+
298310
void drm_mm_init_scan(struct drm_mm *mm,
299311
u64 size,
300312
unsigned alignment,

0 commit comments

Comments
 (0)