blob: f98032c135c6eb610c3c651b0c0c7c49908509bb [file] [log] [blame]
Frederic Weisbecker6baa0a52009-08-14 12:21:53 +02001#include "../perf.h"
2#include <stdlib.h>
3#include <stdio.h>
4#include <string.h>
5#include "thread.h"
6#include "util.h"
Frederic Weisbecker6e086432009-08-18 17:04:03 +02007#include "debug.h"
Frederic Weisbecker6baa0a52009-08-14 12:21:53 +02008
9static struct thread *thread__new(pid_t pid)
10{
11 struct thread *self = malloc(sizeof(*self));
12
13 if (self != NULL) {
14 self->pid = pid;
15 self->comm = malloc(32);
16 if (self->comm)
17 snprintf(self->comm, 32, ":%d", self->pid);
18 INIT_LIST_HEAD(&self->maps);
19 }
20
21 return self;
22}
23
24int thread__set_comm(struct thread *self, const char *comm)
25{
26 if (self->comm)
27 free(self->comm);
28 self->comm = strdup(comm);
29 return self->comm ? 0 : -ENOMEM;
30}
31
32static size_t thread__fprintf(struct thread *self, FILE *fp)
33{
34 struct map *pos;
35 size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
36
37 list_for_each_entry(pos, &self->maps, node)
38 ret += map__fprintf(pos, fp);
39
40 return ret;
41}
42
43struct thread *
44threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
45{
46 struct rb_node **p = &threads->rb_node;
47 struct rb_node *parent = NULL;
48 struct thread *th;
49
50 /*
51 * Font-end cache - PID lookups come in blocks,
52 * so most of the time we dont have to look up
53 * the full rbtree:
54 */
55 if (*last_match && (*last_match)->pid == pid)
56 return *last_match;
57
58 while (*p != NULL) {
59 parent = *p;
60 th = rb_entry(parent, struct thread, rb_node);
61
62 if (th->pid == pid) {
63 *last_match = th;
64 return th;
65 }
66
67 if (pid < th->pid)
68 p = &(*p)->rb_left;
69 else
70 p = &(*p)->rb_right;
71 }
72
73 th = thread__new(pid);
74 if (th != NULL) {
75 rb_link_node(&th->rb_node, parent, p);
76 rb_insert_color(&th->rb_node, threads);
77 *last_match = th;
78 }
79
80 return th;
81}
82
83void thread__insert_map(struct thread *self, struct map *map)
84{
85 struct map *pos, *tmp;
86
87 list_for_each_entry_safe(pos, tmp, &self->maps, node) {
88 if (map__overlap(pos, map)) {
Frederic Weisbecker6e086432009-08-18 17:04:03 +020089 if (verbose >= 2) {
90 printf("overlapping maps:\n");
91 map__fprintf(map, stdout);
92 map__fprintf(pos, stdout);
93 }
94
95 if (map->start <= pos->start && map->end > pos->start)
96 pos->start = map->end;
97
98 if (map->end >= pos->end && map->start < pos->end)
99 pos->end = map->start;
100
101 if (verbose >= 2) {
102 printf("after collision:\n");
103 map__fprintf(pos, stdout);
104 }
105
106 if (pos->start >= pos->end) {
107 list_del_init(&pos->node);
108 free(pos);
109 }
Frederic Weisbecker6baa0a52009-08-14 12:21:53 +0200110 }
111 }
112
113 list_add_tail(&map->node, &self->maps);
114}
115
116int thread__fork(struct thread *self, struct thread *parent)
117{
118 struct map *map;
119
120 if (self->comm)
121 free(self->comm);
122 self->comm = strdup(parent->comm);
123 if (!self->comm)
124 return -ENOMEM;
125
126 list_for_each_entry(map, &parent->maps, node) {
127 struct map *new = map__clone(map);
128 if (!new)
129 return -ENOMEM;
130 thread__insert_map(self, new);
131 }
132
133 return 0;
134}
135
136struct map *thread__find_map(struct thread *self, u64 ip)
137{
138 struct map *pos;
139
140 if (self == NULL)
141 return NULL;
142
143 list_for_each_entry(pos, &self->maps, node)
144 if (ip >= pos->start && ip <= pos->end)
145 return pos;
146
147 return NULL;
148}
149
150size_t threads__fprintf(FILE *fp, struct rb_root *threads)
151{
152 size_t ret = 0;
153 struct rb_node *nd;
154
155 for (nd = rb_first(threads); nd; nd = rb_next(nd)) {
156 struct thread *pos = rb_entry(nd, struct thread, rb_node);
157
158 ret += thread__fprintf(pos, fp);
159 }
160
161 return ret;
162}