blob: 68ceae01151730239f198b2d1cf2bec6f1c6fedd [file] [log] [blame]
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001// Copyright 2006-2008 Google Inc. All Rights Reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28// Platform specific code for Linux goes here
29
30#include <pthread.h>
31#include <semaphore.h>
32#include <signal.h>
33#include <sys/time.h>
34#include <sys/resource.h>
35#include <stdlib.h>
36
37// Ubuntu Dapper requires memory pages to be marked as
38// executable. Otherwise, OS raises an exception when executing code
39// in that page.
40#include <sys/types.h> // mmap & munmap
41#include <sys/mman.h> // mmap & munmap
42#include <sys/stat.h> // open
43#include <sys/fcntl.h> // open
44#include <unistd.h> // getpagesize
45#include <execinfo.h> // backtrace, backtrace_symbols
46#include <errno.h>
47#include <stdarg.h>
48
49#undef MAP_TYPE
50
51#include "v8.h"
52
53#include "platform.h"
54
55
56namespace v8 { namespace internal {
57
58// 0 is never a valid thread id on Linux since tids and pids share a
59// name space and pid 0 is reserved (see man 2 kill).
60static const pthread_t kNoThread = (pthread_t) 0;
61
62
63double ceiling(double x) {
64 return ceil(x);
65}
66
67
68void OS::Setup() {
69 // Seed the random number generator.
70 srandom(static_cast<unsigned int>(TimeCurrentMillis()));
71}
72
73
74int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
75 struct rusage usage;
76
77 if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
78 *secs = usage.ru_utime.tv_sec;
79 *usecs = usage.ru_utime.tv_usec;
80 return 0;
81}
82
83
84double OS::TimeCurrentMillis() {
85 struct timeval tv;
86 if (gettimeofday(&tv, NULL) < 0) return 0.0;
87 return (static_cast<double>(tv.tv_sec) * 1000) +
88 (static_cast<double>(tv.tv_usec) / 1000);
89}
90
91
92int64_t OS::Ticks() {
93 // Linux's gettimeofday has microsecond resolution.
94 struct timeval tv;
95 if (gettimeofday(&tv, NULL) < 0)
96 return 0;
97 return (static_cast<int64_t>(tv.tv_sec) * 1000000) + tv.tv_usec;
98}
99
100
101char* OS::LocalTimezone(double time) {
102 time_t tv = static_cast<time_t>(floor(time/msPerSecond));
103 struct tm* t = localtime(&tv);
104 return const_cast<char*>(t->tm_zone);
105}
106
107
108double OS::DaylightSavingsOffset(double time) {
109 time_t tv = static_cast<time_t>(floor(time/msPerSecond));
110 struct tm* t = localtime(&tv);
111 return t->tm_isdst ? 3600 * msPerSecond : 0;
112}
113
114
115double OS::LocalTimeOffset() {
116 // 1199174400 = Jan 1 2008 (UTC).
117 // Random date where daylight savings time is not in effect.
118 static const int kJan1st2008 = 1199174400;
119 time_t tv = static_cast<time_t>(kJan1st2008);
120 struct tm* t = localtime(&tv);
121 ASSERT(t->tm_isdst <= 0);
122 return static_cast<double>(t->tm_gmtoff * msPerSecond);
123}
124
125
126void OS::Print(const char* format, ...) {
127 va_list args;
128 va_start(args, format);
129 VPrint(format, args);
130 va_end(args);
131}
132
133
134void OS::VPrint(const char* format, va_list args) {
135 vprintf(format, args);
136}
137
138
139void OS::PrintError(const char* format, ...) {
140 va_list args;
141 va_start(args, format);
142 VPrintError(format, args);
143 va_end(args);
144}
145
146
147void OS::VPrintError(const char* format, va_list args) {
148 vfprintf(stderr, format, args);
149}
150
151
152int OS::SNPrintF(char* str, size_t size, const char* format, ...) {
153 va_list args;
154 va_start(args, format);
155 int result = VSNPrintF(str, size, format, args);
156 va_end(args);
157 return result;
158}
159
160
161int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) {
162 return vsnprintf(str, size, format, args); // forward to linux.
163}
164
165
166double OS::nan_value() { return NAN; }
167
168// We keep the lowest and highest addresses mapped as a quick way of
169// determining that pointers are outside the heap (used mostly in assertions
170// and verification). The estimate is conservative, ie, not all addresses in
171// 'allocated' space are actually allocated to our heap. The range is
172// [lowest, highest), inclusive on the low and and exclusive on the high end.
173static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
174static void* highest_ever_allocated = reinterpret_cast<void*>(0);
175
176
177static void UpdateAllocatedSpaceLimits(void* address, int size) {
178 lowest_ever_allocated = Min(lowest_ever_allocated, address);
179 highest_ever_allocated =
180 Max(highest_ever_allocated,
181 reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
182}
183
184
185bool OS::IsOutsideAllocatedSpace(void* address) {
186 return address < lowest_ever_allocated || address >= highest_ever_allocated;
187}
188
189
190size_t OS::AllocateAlignment() {
191 return getpagesize();
192}
193
194
195void* OS::Allocate(const size_t requested, size_t* allocated) {
196 const size_t msize = RoundUp(requested, getpagesize());
197 void* mbase = mmap(NULL, msize, PROT_READ | PROT_WRITE | PROT_EXEC,
198 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
199 if (mbase == MAP_FAILED) {
200 LOG(StringEvent("OS::Allocate", "mmap failed"));
201 return NULL;
202 }
203 *allocated = msize;
204 UpdateAllocatedSpaceLimits(mbase, msize);
205 return mbase;
206}
207
208
209void OS::Free(void* buf, const size_t length) {
210 // TODO(1240712): munmap has a return value which is ignored here.
211 munmap(buf, length);
212}
213
214
215void OS::Sleep(int milliseconds) {
216 unsigned int ms = static_cast<unsigned int>(milliseconds);
217 usleep(1000 * ms);
218}
219
220
221void OS::Abort() {
222 // Redirect to std abort to signal abnormal program termination.
223 abort();
224}
225
226
227class PosixMemoryMappedFile : public OS::MemoryMappedFile {
228 public:
229 PosixMemoryMappedFile(FILE* file, void* memory, int size)
230 : file_(file), memory_(memory), size_(size) { }
231 virtual ~PosixMemoryMappedFile();
232 virtual void* memory() { return memory_; }
233 private:
234 FILE* file_;
235 void* memory_;
236 int size_;
237};
238
239
240OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
241 void* initial) {
242 FILE* file = fopen(name, "w+");
243 if (file == NULL) return NULL;
244 fwrite(initial, size, 1, file);
245 void* memory =
246 mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
247 return new PosixMemoryMappedFile(file, memory, size);
248}
249
250
251PosixMemoryMappedFile::~PosixMemoryMappedFile() {
252 if (memory_) munmap(memory_, size_);
253 fclose(file_);
254}
255
256#ifdef ENABLE_LOGGING_AND_PROFILING
257static unsigned StringToLongLong(char* buffer) {
258 return static_cast<unsigned>(strtoll(buffer, NULL, 16));
259}
260
261#endif
262
263void OS::LogSharedLibraryAddresses() {
264#ifdef ENABLE_LOGGING_AND_PROFILING
265 static const int MAP_LENGTH = 1024;
266 int fd = open("/proc/self/maps", O_RDONLY);
267 if (fd < 0) return;
268 while (true) {
269 char addr_buffer[11];
270 addr_buffer[0] = '0';
271 addr_buffer[1] = 'x';
272 addr_buffer[10] = 0;
273 read(fd, addr_buffer + 2, 8);
274 unsigned start = StringToLongLong(addr_buffer);
275 read(fd, addr_buffer + 2, 1);
276 if (addr_buffer[2] != '-') return;
277 read(fd, addr_buffer + 2, 8);
278 unsigned end = StringToLongLong(addr_buffer);
279 char buffer[MAP_LENGTH];
280 int bytes_read = -1;
281 do {
282 bytes_read++;
283 if (bytes_read > MAP_LENGTH - 1)
284 break;
285 int result = read(fd, buffer + bytes_read, 1);
286 // A read error means that -1 is returned.
287 if (result < 1) return;
288 } while (buffer[bytes_read] != '\n');
289 buffer[bytes_read] = 0;
290 // There are 56 chars to ignore at this point in the line.
291 if (bytes_read < 56) continue;
292 // Ignore mappings that are not executable.
293 if (buffer[3] != 'x') continue;
294 buffer[bytes_read] = 0;
295 LOG(SharedLibraryEvent(buffer + 56, start, end));
296 }
297#endif
298}
299
300
301int OS::StackWalk(OS::StackFrame* frames, int frames_size) {
302 void** addresses = NewArray<void*>(frames_size);
303
304 int frames_count = backtrace(addresses, frames_size);
305
306 char** symbols;
307 symbols = backtrace_symbols(addresses, frames_count);
308 if (symbols == NULL) {
309 DeleteArray(addresses);
310 return kStackWalkError;
311 }
312
313 for (int i = 0; i < frames_count; i++) {
314 frames[i].address = addresses[i];
315 // Format a text representation of the frame based on the information
316 // available.
317 SNPrintF(frames[i].text, kStackWalkMaxTextLen, "%s", symbols[i]);
318 // Make sure line termination is in place.
319 frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
320 }
321
322 DeleteArray(addresses);
323 free(symbols);
324
325 return frames_count;
326}
327
328
329// Constants used for mmap.
330static const int kMmapFd = -1;
331static const int kMmapFdOffset = 0;
332
333
334VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
335 address_ = mmap(address_hint, size, PROT_NONE,
336 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
337 kMmapFd, kMmapFdOffset);
338 size_ = size;
339}
340
341
342VirtualMemory::~VirtualMemory() {
343 if (IsReserved()) {
344 if (0 == munmap(address(), size())) address_ = MAP_FAILED;
345 }
346}
347
348
349bool VirtualMemory::IsReserved() {
350 return address_ != MAP_FAILED;
351}
352
353
354bool VirtualMemory::Commit(void* address, size_t size) {
355 if (MAP_FAILED == mmap(address, size, PROT_READ | PROT_WRITE | PROT_EXEC,
356 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
357 kMmapFd, kMmapFdOffset)) {
358 return false;
359 }
360
361 UpdateAllocatedSpaceLimits(address, size);
362 return true;
363}
364
365
366bool VirtualMemory::Uncommit(void* address, size_t size) {
367 return mmap(address, size, PROT_NONE,
368 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
369 kMmapFd, kMmapFdOffset) != MAP_FAILED;
370}
371
372
373class ThreadHandle::PlatformData : public Malloced {
374 public:
375 explicit PlatformData(ThreadHandle::Kind kind) {
376 Initialize(kind);
377 }
378
379 void Initialize(ThreadHandle::Kind kind) {
380 switch (kind) {
381 case ThreadHandle::SELF: thread_ = pthread_self(); break;
382 case ThreadHandle::INVALID: thread_ = kNoThread; break;
383 }
384 }
385 pthread_t thread_; // Thread handle for pthread.
386};
387
388
389ThreadHandle::ThreadHandle(Kind kind) {
390 data_ = new PlatformData(kind);
391}
392
393
394void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
395 data_->Initialize(kind);
396}
397
398
399ThreadHandle::~ThreadHandle() {
400 delete data_;
401}
402
403
404bool ThreadHandle::IsSelf() const {
405 return pthread_equal(data_->thread_, pthread_self());
406}
407
408
409bool ThreadHandle::IsValid() const {
410 return data_->thread_ != kNoThread;
411}
412
413
414Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
415}
416
417
418Thread::~Thread() {
419}
420
421
422static void* ThreadEntry(void* arg) {
423 Thread* thread = reinterpret_cast<Thread*>(arg);
424 // This is also initialized by the first argument to pthread_create() but we
425 // don't know which thread will run first (the original thread or the new
426 // one) so we initialize it here too.
427 thread->thread_handle_data()->thread_ = pthread_self();
428 ASSERT(thread->IsValid());
429 thread->Run();
430 return NULL;
431}
432
433
434void Thread::Start() {
435 pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
436 ASSERT(IsValid());
437}
438
439
440void Thread::Join() {
441 pthread_join(thread_handle_data()->thread_, NULL);
442}
443
444
445Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
446 pthread_key_t key;
447 int result = pthread_key_create(&key, NULL);
448 USE(result);
449 ASSERT(result == 0);
450 return static_cast<LocalStorageKey>(key);
451}
452
453
454void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
455 pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
456 int result = pthread_key_delete(pthread_key);
457 USE(result);
458 ASSERT(result == 0);
459}
460
461
462void* Thread::GetThreadLocal(LocalStorageKey key) {
463 pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
464 return pthread_getspecific(pthread_key);
465}
466
467
468void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
469 pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
470 pthread_setspecific(pthread_key, value);
471}
472
473
474void Thread::YieldCPU() {
475 sched_yield();
476}
477
478
479class LinuxMutex : public Mutex {
480 public:
481
482 LinuxMutex() {
483 pthread_mutexattr_t attrs;
484 int result = pthread_mutexattr_init(&attrs);
485 ASSERT(result == 0);
486 result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
487 ASSERT(result == 0);
488 result = pthread_mutex_init(&mutex_, &attrs);
489 ASSERT(result == 0);
490 }
491
492 virtual ~LinuxMutex() { pthread_mutex_destroy(&mutex_); }
493
494 virtual int Lock() {
495 int result = pthread_mutex_lock(&mutex_);
496 return result;
497 }
498
499 virtual int Unlock() {
500 int result = pthread_mutex_unlock(&mutex_);
501 return result;
502 }
503
504 private:
505 pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
506};
507
508
509Mutex* OS::CreateMutex() {
510 return new LinuxMutex();
511}
512
513
514class LinuxSemaphore : public Semaphore {
515 public:
516 explicit LinuxSemaphore(int count) { sem_init(&sem_, 0, count); }
517 virtual ~LinuxSemaphore() { sem_destroy(&sem_); }
518
519 virtual void Wait() { sem_wait(&sem_); }
520
521 virtual void Signal() { sem_post(&sem_); }
522
523 private:
524 sem_t sem_;
525};
526
527
528Semaphore* OS::CreateSemaphore(int count) {
529 return new LinuxSemaphore(count);
530}
531
532// TODO(1233584): Implement Linux support.
533Select::Select(int len, Semaphore** sems) {
534 FATAL("Not implemented");
535}
536
537
538Select::~Select() {
539 FATAL("Not implemented");
540}
541
542
543int Select::WaitSingle() {
544 FATAL("Not implemented");
545 return 0;
546}
547
548
549void Select::WaitAll() {
550 FATAL("Not implemented");
551}
552
553#ifdef ENABLE_LOGGING_AND_PROFILING
554
555static ProfileSampler* active_sampler_ = NULL;
556
557static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
558 USE(info);
559 if (signal != SIGPROF) return;
560
561 // Extracting the sample from the context is extremely machine dependent.
562 TickSample sample;
563 ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
564 mcontext_t& mcontext = ucontext->uc_mcontext;
565#if defined (__arm__) || defined(__thumb__)
566 sample.pc = mcontext.gregs[R15];
567 sample.sp = mcontext.gregs[R13];
568#else
569 sample.pc = mcontext.gregs[REG_EIP];
570 sample.sp = mcontext.gregs[REG_ESP];
571#endif
572 sample.state = Logger::state();
573
574 if (active_sampler_ == NULL) return;
575 active_sampler_->Tick(&sample);
576}
577
578
579class ProfileSampler::PlatformData : public Malloced {
580 public:
581 PlatformData() {
582 signal_handler_installed_ = false;
583 }
584
585 bool signal_handler_installed_;
586 struct sigaction old_signal_handler_;
587 struct itimerval old_timer_value_;
588};
589
590
591ProfileSampler::ProfileSampler(int interval) {
592 data_ = new PlatformData();
593 interval_ = interval;
594 active_ = false;
595}
596
597
598ProfileSampler::~ProfileSampler() {
599 delete data_;
600}
601
602
603void ProfileSampler::Start() {
604 // There can only be one active sampler at the time on POSIX
605 // platforms.
606 if (active_sampler_ != NULL) return;
607
608 // Request profiling signals.
609 struct sigaction sa;
610 sa.sa_sigaction = ProfilerSignalHandler;
611 sigemptyset(&sa.sa_mask);
612 sa.sa_flags = SA_SIGINFO;
613 if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
614 data_->signal_handler_installed_ = true;
615
616 // Set the itimer to generate a tick for each interval.
617 itimerval itimer;
618 itimer.it_interval.tv_sec = interval_ / 1000;
619 itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
620 itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
621 itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
622 setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
623
624 // Set this sampler as the active sampler.
625 active_sampler_ = this;
626 active_ = true;
627}
628
629
630void ProfileSampler::Stop() {
631 // Restore old signal handler
632 if (data_->signal_handler_installed_) {
633 setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
634 sigaction(SIGPROF, &data_->old_signal_handler_, 0);
635 data_->signal_handler_installed_ = false;
636 }
637
638 // This sampler is no longer the active sampler.
639 active_sampler_ = NULL;
640 active_ = false;
641}
642
643#endif // ENABLE_LOGGING_AND_PROFILING
644
645} } // namespace v8::internal