blob: 00c14b98d6518ecb80c8a75e6d50b953c94e851e [file] [log] [blame]
Frederic Weisbecker6baa0a52009-08-14 12:21:53 +02001#include "../perf.h"
2#include <stdlib.h>
3#include <stdio.h>
4#include <string.h>
5#include "thread.h"
6#include "util.h"
7
8static struct thread *thread__new(pid_t pid)
9{
10 struct thread *self = malloc(sizeof(*self));
11
12 if (self != NULL) {
13 self->pid = pid;
14 self->comm = malloc(32);
15 if (self->comm)
16 snprintf(self->comm, 32, ":%d", self->pid);
17 INIT_LIST_HEAD(&self->maps);
18 }
19
20 return self;
21}
22
23int thread__set_comm(struct thread *self, const char *comm)
24{
25 if (self->comm)
26 free(self->comm);
27 self->comm = strdup(comm);
28 return self->comm ? 0 : -ENOMEM;
29}
30
31static size_t thread__fprintf(struct thread *self, FILE *fp)
32{
33 struct map *pos;
34 size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
35
36 list_for_each_entry(pos, &self->maps, node)
37 ret += map__fprintf(pos, fp);
38
39 return ret;
40}
41
42struct thread *
43threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
44{
45 struct rb_node **p = &threads->rb_node;
46 struct rb_node *parent = NULL;
47 struct thread *th;
48
49 /*
50 * Font-end cache - PID lookups come in blocks,
51 * so most of the time we dont have to look up
52 * the full rbtree:
53 */
54 if (*last_match && (*last_match)->pid == pid)
55 return *last_match;
56
57 while (*p != NULL) {
58 parent = *p;
59 th = rb_entry(parent, struct thread, rb_node);
60
61 if (th->pid == pid) {
62 *last_match = th;
63 return th;
64 }
65
66 if (pid < th->pid)
67 p = &(*p)->rb_left;
68 else
69 p = &(*p)->rb_right;
70 }
71
72 th = thread__new(pid);
73 if (th != NULL) {
74 rb_link_node(&th->rb_node, parent, p);
75 rb_insert_color(&th->rb_node, threads);
76 *last_match = th;
77 }
78
79 return th;
80}
81
82void thread__insert_map(struct thread *self, struct map *map)
83{
84 struct map *pos, *tmp;
85
86 list_for_each_entry_safe(pos, tmp, &self->maps, node) {
87 if (map__overlap(pos, map)) {
88 list_del_init(&pos->node);
89 /* XXX leaks dsos */
90 free(pos);
91 }
92 }
93
94 list_add_tail(&map->node, &self->maps);
95}
96
97int thread__fork(struct thread *self, struct thread *parent)
98{
99 struct map *map;
100
101 if (self->comm)
102 free(self->comm);
103 self->comm = strdup(parent->comm);
104 if (!self->comm)
105 return -ENOMEM;
106
107 list_for_each_entry(map, &parent->maps, node) {
108 struct map *new = map__clone(map);
109 if (!new)
110 return -ENOMEM;
111 thread__insert_map(self, new);
112 }
113
114 return 0;
115}
116
117struct map *thread__find_map(struct thread *self, u64 ip)
118{
119 struct map *pos;
120
121 if (self == NULL)
122 return NULL;
123
124 list_for_each_entry(pos, &self->maps, node)
125 if (ip >= pos->start && ip <= pos->end)
126 return pos;
127
128 return NULL;
129}
130
131size_t threads__fprintf(FILE *fp, struct rb_root *threads)
132{
133 size_t ret = 0;
134 struct rb_node *nd;
135
136 for (nd = rb_first(threads); nd; nd = rb_next(nd)) {
137 struct thread *pos = rb_entry(nd, struct thread, rb_node);
138
139 ret += thread__fprintf(pos, fp);
140 }
141
142 return ret;
143}