blob: d9cc1ced324261def43d95195441ca4f5c561c20 [file] [log] [blame]
Upstreamcc2ee171970-01-12 13:46:40 +00001/**
2 * @file db_manage.c
3 * Management of a DB file
4 *
5 * @remark Copyright 2002 OProfile authors
6 * @remark Read the file COPYING
7 *
8 * @author Philippe Elie
9 */
10
11#define _GNU_SOURCE
12
13#include <stdlib.h>
The Android Open Source Project48ae5fc2008-10-21 07:00:00 -070014#include <fcntl.h>
Upstreamcc2ee171970-01-12 13:46:40 +000015#include <sys/mman.h>
16#include <sys/types.h>
17#include <sys/stat.h>
The Android Open Source Project48ae5fc2008-10-21 07:00:00 -070018#include <fcntl.h>
Upstreamcc2ee171970-01-12 13:46:40 +000019#include <unistd.h>
20#include <errno.h>
21#include <string.h>
22#include <stdio.h>
23
24#include "odb.h"
25#include "op_string.h"
26#include "op_libiberty.h"
27
28
29static __inline odb_descr_t * odb_to_descr(odb_data_t * data)
30{
31 return (odb_descr_t *)(((char*)data->base_memory) + data->sizeof_header);
32}
33
34
35static __inline odb_node_t * odb_to_node_base(odb_data_t * data)
36{
37 return (odb_node_t *)(((char *)data->base_memory) + data->offset_node);
38}
39
40
41static __inline odb_index_t * odb_to_hash_base(odb_data_t * data)
42{
43 return (odb_index_t *)(((char *)data->base_memory) +
44 data->offset_node +
45 (data->descr->size * sizeof(odb_node_t)));
46}
47
48
49/**
50 * return the number of bytes used by hash table, node table and header.
51 */
52static unsigned int tables_size(odb_data_t const * data, odb_node_nr_t node_nr)
53{
54 size_t size;
55
56 size = node_nr * (sizeof(odb_index_t) * BUCKET_FACTOR);
57 size += node_nr * sizeof(odb_node_t);
58 size += data->offset_node;
59
60 return size;
61}
62
63
64odb_index_t odb_hash_add_node(odb_t * odb)
65{
66 odb_data_t * data = odb->data;
67
68 if (data->descr->current_size >= data->descr->size) {
69 unsigned int old_file_size;
70 unsigned int new_file_size;
71 unsigned int pos;
72 void * new_map;
73
74 old_file_size = tables_size(data, data->descr->size);
75 new_file_size = tables_size(data, data->descr->size * 2);
76
77 if (ftruncate(data->fd, new_file_size))
78 return ODB_NODE_NR_INVALID;
79
80 new_map = mremap(data->base_memory,
81 old_file_size, new_file_size, MREMAP_MAYMOVE);
82
83 if (new_map == MAP_FAILED)
84 return ODB_NODE_NR_INVALID;
85
86 data->base_memory = new_map;
87 data->descr = odb_to_descr(data);
88 data->descr->size *= 2;
89 data->node_base = odb_to_node_base(data);
90 data->hash_base = odb_to_hash_base(data);
91 data->hash_mask = (data->descr->size * BUCKET_FACTOR) - 1;
92
93 /* rebuild the hash table, node zero is never used. This works
94 * because layout of file is node table then hash table,
95 * sizeof(node) > sizeof(bucket) and when we grow table we
96 * double size ==> old hash table and new hash table can't
97 * overlap so on the new hash table is entirely in the new
98 * memory area (the grown part) and we know the new hash
99 * hash table is zeroed. That's why we don't need to zero init
100 * the new table */
101 /* OK: the above is not exact
102 * if BUCKET_FACTOR < sizeof(bd_node_t) / sizeof(bd_node_nr_t)
103 * all things are fine and we don't need to init the hash
104 * table because in this case the new hash table is completely
105 * inside the new growed part. Avoiding to touch this memory is
106 * useful.
107 */
108#if 0
109 for (pos = 0 ; pos < data->descr->size*BUCKET_FACTOR ; ++pos) {
110 data->hash_base[pos] = 0;
111 }
112#endif
113
114 for (pos = 1; pos < data->descr->current_size; ++pos) {
115 odb_node_t * node = &data->node_base[pos];
116 size_t index = odb_do_hash(data, node->key);
117 node->next = data->hash_base[index];
118 data->hash_base[index] = pos;
119 }
120 }
121
122 return (odb_index_t)data->descr->current_size++;
123}
124
125
126void odb_init(odb_t * odb)
127{
128 odb->data = NULL;
129}
130
131
132/* the default number of page, calculated to fit in 4096 bytes */
133#define DEFAULT_NODE_NR(offset_node) 128
134#define FILES_HASH_SIZE 512
135
136static struct list_head files_hash[FILES_HASH_SIZE];
137
138
139static void init_hash()
140{
141 size_t i;
142 for (i = 0; i < FILES_HASH_SIZE; ++i)
143 list_init(&files_hash[i]);
144}
145
146
147static odb_data_t *
148find_samples_data(size_t hash, char const * filename)
149{
150 struct list_head * pos;
151
152 /* FIXME: maybe an initial init routine ? */
153 if (files_hash[0].next == NULL) {
154 init_hash();
155 return NULL;
156 }
157
158 list_for_each(pos, &files_hash[hash]) {
159 odb_data_t * entry = list_entry(pos, odb_data_t, list);
160 if (strcmp(entry->filename, filename) == 0)
161 return entry;
162 }
163
164 return NULL;
165}
166
167
168int odb_open(odb_t * odb, char const * filename, enum odb_rw rw,
169 size_t sizeof_header)
170{
171 struct stat stat_buf;
172 odb_node_nr_t nr_node;
173 odb_data_t * data;
174 size_t hash;
175 int err = 0;
176
177 int flags = (rw == ODB_RDWR) ? (O_CREAT | O_RDWR) : O_RDONLY;
178 int mmflags = (rw == ODB_RDWR) ? (PROT_READ | PROT_WRITE) : PROT_READ;
179
180 hash = op_hash_string(filename) % FILES_HASH_SIZE;
181 data = find_samples_data(hash, filename);
182 if (data) {
183 odb->data = data;
184 data->ref_count++;
185 return 0;
186 }
187
188 data = xmalloc(sizeof(odb_data_t));
189 memset(data, '\0', sizeof(odb_data_t));
190 list_init(&data->list);
191 data->offset_node = sizeof_header + sizeof(odb_descr_t);
192 data->sizeof_header = sizeof_header;
193 data->ref_count = 1;
194 data->filename = xstrdup(filename);
195
196 data->fd = open(filename, flags, 0644);
197 if (data->fd < 0) {
198 err = errno;
199 goto out;
200 }
201
202 if (fstat(data->fd, &stat_buf)) {
203 err = errno;
204 goto fail;
205 }
206
207 if (stat_buf.st_size == 0) {
208 size_t file_size;
209
210 if (rw == ODB_RDONLY) {
211 err = EIO;
212 goto fail;
213 }
214
215 nr_node = DEFAULT_NODE_NR(data->offset_node);
216
217 file_size = tables_size(data, nr_node);
218 if (ftruncate(data->fd, file_size)) {
219 err = errno;
220 goto fail;
221 }
222 } else {
223 /* Calculate nr node allowing a sanity check later */
224 nr_node = (stat_buf.st_size - data->offset_node) /
225 ((sizeof(odb_index_t) * BUCKET_FACTOR) + sizeof(odb_node_t));
226 }
227
228 data->base_memory = mmap(0, tables_size(data, nr_node), mmflags,
229 MAP_SHARED, data->fd, 0);
230
231 if (data->base_memory == MAP_FAILED) {
232 err = errno;
233 goto fail;
234 }
235
236 data->descr = odb_to_descr(data);
237
238 if (stat_buf.st_size == 0) {
239 data->descr->size = nr_node;
240 /* page zero is not used */
241 data->descr->current_size = 1;
242 } else {
243 /* file already exist, sanity check nr node */
244 if (nr_node != data->descr->size) {
245 err = EINVAL;
246 goto fail_unmap;
247 }
248 }
249
250 data->hash_base = odb_to_hash_base(data);
251 data->node_base = odb_to_node_base(data);
252 data->hash_mask = (data->descr->size * BUCKET_FACTOR) - 1;
253
254 list_add(&data->list, &files_hash[hash]);
255 odb->data = data;
256out:
257 return err;
258fail_unmap:
259 munmap(data->base_memory, tables_size(data, nr_node));
260fail:
261 close(data->fd);
262 free(data->filename);
263 free(data);
264 odb->data = NULL;
265 goto out;
266}
267
268
269void odb_close(odb_t * odb)
270{
271 odb_data_t * data = odb->data;
272
273 if (data) {
274 data->ref_count--;
275 if (data->ref_count == 0) {
276 size_t size = tables_size(data, data->descr->size);
277 list_del(&data->list);
278 munmap(data->base_memory, size);
279 if (data->fd >= 0)
280 close(data->fd);
281 free(data->filename);
282 free(data);
283 odb->data = NULL;
284 }
285 }
286}
287
288
289int odb_open_count(odb_t const * odb)
290{
291 if (!odb->data)
292 return 0;
293 return odb->data->ref_count;
294}
295
296
297void * odb_get_data(odb_t * odb)
298{
299 return odb->data->base_memory;
300}
301
302
303void odb_sync(odb_t const * odb)
304{
305 odb_data_t * data = odb->data;
306 size_t size;
307
308 if (!data)
309 return;
310
311 size = tables_size(data, data->descr->size);
312 msync(data->base_memory, size, MS_ASYNC);
313}