Version 3.27.3 (based on bleeding_edge revision r21290)
Performance and stability improvements on all platforms.
git-svn-id: https://v8.googlecode.com/svn/trunk@21291 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index d8cefba..b5f7432 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+2014-05-14: Version 3.27.3
+
+ Performance and stability improvements on all platforms.
+
+
2014-05-13: Version 3.27.2
Harden %SetIsObserved with RUNTIME_ASSERTs (Chromium issue 371782).
diff --git a/src/arm64/cpu-arm64.cc b/src/arm64/cpu-arm64.cc
index 0ed7e9f..a96a7b1 100644
--- a/src/arm64/cpu-arm64.cc
+++ b/src/arm64/cpu-arm64.cc
@@ -39,8 +39,9 @@
private:
uint32_t ExtractCacheLineSize(int cache_line_size_shift) const {
- // The cache type register holds the size of the caches as a power of two.
- return 1 << ((cache_type_register_ >> cache_line_size_shift) & 0xf);
+ // The cache type register holds the size of cache lines in words as a
+ // power of two.
+ return 4 << ((cache_type_register_ >> cache_line_size_shift) & 0xf);
}
uint32_t cache_type_register_;
diff --git a/src/cpu.cc b/src/cpu.cc
index 3baae64..8d9afd8 100644
--- a/src/cpu.cc
+++ b/src/cpu.cc
@@ -206,6 +206,7 @@
size_t datalen_;
};
+#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
// Checks that a space-separated list of items contains one given 'item'.
static bool HasListItem(const char* list, const char* item) {
@@ -231,6 +232,8 @@
return false;
}
+#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
+
#endif // V8_OS_LINUX
#endif // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
diff --git a/src/d8.cc b/src/d8.cc
index b53ddf8..d19247a 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -1116,29 +1116,6 @@
}
-#ifndef V8_SHARED
-static char* ReadToken(char* data, char token) {
- char* next = i::OS::StrChr(data, token);
- if (next != NULL) {
- *next = '\0';
- return (next + 1);
- }
-
- return NULL;
-}
-
-
-static char* ReadLine(char* data) {
- return ReadToken(data, '\n');
-}
-
-
-static char* ReadWord(char* data) {
- return ReadToken(data, ' ');
-}
-#endif // !V8_SHARED
-
-
// Reads a file into a v8 string.
Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
int size = 0;
@@ -1172,71 +1149,6 @@
}
-#ifndef V8_SHARED
-class ShellThread : public i::Thread {
- public:
- // Takes ownership of the underlying char array of |files|.
- ShellThread(Isolate* isolate, char* files)
- : Thread("d8:ShellThread"),
- isolate_(isolate), files_(files) { }
-
- ~ShellThread() {
- delete[] files_;
- }
-
- virtual void Run();
- private:
- Isolate* isolate_;
- char* files_;
-};
-
-
-void ShellThread::Run() {
- char* ptr = files_;
- while ((ptr != NULL) && (*ptr != '\0')) {
- // For each newline-separated line.
- char* next_line = ReadLine(ptr);
-
- if (*ptr == '#') {
- // Skip comment lines.
- ptr = next_line;
- continue;
- }
-
- // Prepare the context for this thread.
- Locker locker(isolate_);
- HandleScope outer_scope(isolate_);
- Local<Context> thread_context =
- Shell::CreateEvaluationContext(isolate_);
- Context::Scope context_scope(thread_context);
- PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate_));
-
- while ((ptr != NULL) && (*ptr != '\0')) {
- HandleScope inner_scope(isolate_);
- char* filename = ptr;
- ptr = ReadWord(ptr);
-
- // Skip empty strings.
- if (strlen(filename) == 0) {
- continue;
- }
-
- Handle<String> str = Shell::ReadFile(isolate_, filename);
- if (str.IsEmpty()) {
- printf("File '%s' not found\n", filename);
- Shell::Exit(1);
- }
-
- Shell::ExecuteString(
- isolate_, str, String::NewFromUtf8(isolate_, filename), false, false);
- }
-
- ptr = next_line;
- }
-}
-#endif // !V8_SHARED
-
-
SourceGroup::~SourceGroup() {
#ifndef V8_SHARED
delete thread_;
@@ -1360,8 +1272,6 @@
bool Shell::SetOptions(int argc, char* argv[]) {
bool logfile_per_isolate = false;
for (int i = 0; i < argc; i++) {
- // Turn '_' into '-'.
- // for (char* c = arg; *c != '\0'; c++) if (*c == '_') *c = '-';
if (strcmp(argv[i], "--stress-opt") == 0) {
options.stress_opt = true;
argv[i] = NULL;
@@ -1400,13 +1310,6 @@
return false;
#endif // V8_SHARED
options.num_isolates++;
- } else if (strcmp(argv[i], "-p") == 0) {
-#ifdef V8_SHARED
- printf("D8 with shared library does not support multi-threading\n");
- return false;
-#else
- options.num_parallel_files++;
-#endif // V8_SHARED
} else if (strcmp(argv[i], "--dump-heap-constants") == 0) {
#ifdef V8_SHARED
printf("D8 with shared library does not support constant dumping\n");
@@ -1433,30 +1336,6 @@
#endif // V8_SHARED
}
-#ifndef V8_SHARED
- // Run parallel threads if we are not using --isolate
- options.parallel_files = new char*[options.num_parallel_files];
- int parallel_files_set = 0;
- for (int i = 1; i < argc; i++) {
- if (argv[i] == NULL) continue;
- if (strcmp(argv[i], "-p") == 0 && i + 1 < argc) {
- if (options.num_isolates > 1) {
- printf("-p is not compatible with --isolate\n");
- return false;
- }
- argv[i] = NULL;
- i++;
- options.parallel_files[parallel_files_set] = argv[i];
- parallel_files_set++;
- argv[i] = NULL;
- }
- }
- if (parallel_files_set != options.num_parallel_files) {
- printf("-p requires a file containing a list of files as parameter\n");
- return false;
- }
-#endif // !V8_SHARED
-
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
// Set up isolated source groups.
@@ -1485,23 +1364,6 @@
int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
#ifndef V8_SHARED
- i::List<i::Thread*> threads(1);
- if (options.parallel_files != NULL) {
- for (int i = 0; i < options.num_parallel_files; i++) {
- char* files = NULL;
- { Locker lock(isolate);
- int size = 0;
- files = ReadChars(isolate, options.parallel_files[i], &size);
- }
- if (files == NULL) {
- printf("File list '%s' not found\n", options.parallel_files[i]);
- Exit(1);
- }
- ShellThread* thread = new ShellThread(isolate, files);
- thread->Start();
- threads.Add(thread);
- }
- }
for (int i = 1; i < options.num_isolates; ++i) {
options.isolate_sources[i].StartExecuteInThread();
}
@@ -1541,12 +1403,6 @@
for (int i = 1; i < options.num_isolates; ++i) {
options.isolate_sources[i].WaitForThread();
}
-
- for (int i = 0; i < threads.length(); i++) {
- i::Thread* thread = threads[i];
- thread->Join();
- delete thread;
- }
#endif // !V8_SHARED
return 0;
}
diff --git a/src/d8.h b/src/d8.h
index bf29023..75d007f 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -195,10 +195,6 @@
class ShellOptions {
public:
ShellOptions() :
-#ifndef V8_SHARED
- num_parallel_files(0),
- parallel_files(NULL),
-#endif // !V8_SHARED
script_executed(false),
last_run(true),
send_idle_notification(false),
@@ -214,16 +210,9 @@
icu_data_file(NULL) { }
~ShellOptions() {
-#ifndef V8_SHARED
- delete[] parallel_files;
-#endif // !V8_SHARED
delete[] isolate_sources;
}
-#ifndef V8_SHARED
- int num_parallel_files;
- char** parallel_files;
-#endif // !V8_SHARED
bool script_executed;
bool last_run;
bool send_idle_notification;
diff --git a/src/execution.h b/src/execution.h
index 6d0b15f..563c496 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -212,8 +212,6 @@
void ClearInterrupt(int flagbit);
bool CheckAndClearInterrupt(InterruptFlag flag, const ExecutionAccess& lock);
- void InvokeApiInterruptCallback();
-
// You should hold the ExecutionAccess lock when calling this method.
bool has_pending_interrupts(const ExecutionAccess& lock) {
// Sanity check: We shouldn't be asking about pending interrupts
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 291f47d..31058ae 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -359,8 +359,6 @@
"enable use of SSE3 instructions if available")
DEFINE_bool(enable_sse4_1, true,
"enable use of SSE4.1 instructions if available")
-DEFINE_bool(enable_cmov, true,
- "enable use of CMOV instruction if available")
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, ENABLE_VFP3_DEFAULT,
diff --git a/src/heap.cc b/src/heap.cc
index ddec9a9..f0c9154 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1681,36 +1681,24 @@
void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
- // We don't record weak slots during marking or scavenges.
- // Instead we do it once when we complete mark-compact cycle.
- // Note that write barrier has no effect if we are already in the middle of
- // compacting mark-sweep cycle and we have to record slots manually.
- bool record_slots =
- gc_state() == MARK_COMPACT &&
- mark_compact_collector()->is_compacting();
- ProcessArrayBuffers(retainer, record_slots);
- ProcessNativeContexts(retainer, record_slots);
+ ProcessArrayBuffers(retainer);
+ ProcessNativeContexts(retainer);
// TODO(mvstanton): AllocationSites only need to be processed during
// MARK_COMPACT, as they live in old space. Verify and address.
- ProcessAllocationSites(retainer, record_slots);
+ ProcessAllocationSites(retainer);
}
-void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
- bool record_slots) {
- Object* head =
- VisitWeakList<Context>(
- this, native_contexts_list(), retainer, record_slots);
+
+void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
+ Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
// Update the head of the list of contexts.
set_native_contexts_list(head);
}
-void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
- bool record_slots) {
+void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
Object* array_buffer_obj =
- VisitWeakList<JSArrayBuffer>(this,
- array_buffers_list(),
- retainer, record_slots);
+ VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
set_array_buffers_list(array_buffer_obj);
}
@@ -1726,12 +1714,9 @@
}
-void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
- bool record_slots) {
+void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
Object* allocation_site_obj =
- VisitWeakList<AllocationSite>(this,
- allocation_sites_list(),
- retainer, record_slots);
+ VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
set_allocation_sites_list(allocation_site_obj);
}
diff --git a/src/heap.h b/src/heap.h
index 0ea4529..8e3cd3f 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1974,9 +1974,9 @@
// Code to be run before and after mark-compact.
void MarkCompactPrologue();
- void ProcessNativeContexts(WeakObjectRetainer* retainer, bool record_slots);
- void ProcessArrayBuffers(WeakObjectRetainer* retainer, bool record_slots);
- void ProcessAllocationSites(WeakObjectRetainer* retainer, bool record_slots);
+ void ProcessNativeContexts(WeakObjectRetainer* retainer);
+ void ProcessArrayBuffers(WeakObjectRetainer* retainer);
+ void ProcessAllocationSites(WeakObjectRetainer* retainer);
// Deopts all code that contains allocation instruction which are tenured or
// not tenured. Moreover it clears the pretenuring allocation site statistics.
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 58ccb1c..984de00 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -62,21 +62,6 @@
}
-int DoubleRegister::NumAllocatableRegisters() {
- return XMMRegister::kNumAllocatableRegisters;
-}
-
-
-int DoubleRegister::NumRegisters() {
- return XMMRegister::kNumRegisters;
-}
-
-
-const char* DoubleRegister::AllocationIndexToString(int index) {
- return XMMRegister::AllocationIndexToString(index);
-}
-
-
void CpuFeatures::Probe(bool serializer_enabled) {
ASSERT(!initialized_);
ASSERT(supported_ == 0);
@@ -98,10 +83,7 @@
}
CHECK(cpu.has_sse2()); // SSE2 support is mandatory.
-
- if (cpu.has_cmov()) {
- probed_features |= static_cast<uint64_t>(1) << CMOV;
- }
+ CHECK(cpu.has_cmov()); // CMOV support is mandatory.
// SAHF must be available in compat/legacy mode.
ASSERT(cpu.has_sahf());
@@ -636,7 +618,6 @@
void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
- ASSERT(IsEnabled(CMOV));
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
EMIT(0x0F);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 74709fa..6b60c12 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -141,71 +141,41 @@
}
-struct DoubleRegister {
- static const int kMaxNumRegisters = 8;
+struct XMMRegister {
static const int kMaxNumAllocatableRegisters = 7;
- static int NumAllocatableRegisters();
- static int NumRegisters();
- static const char* AllocationIndexToString(int index);
+ static const int kMaxNumRegisters = 8;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
- static int ToAllocationIndex(DoubleRegister reg) {
+ static int ToAllocationIndex(XMMRegister reg) {
ASSERT(reg.code() != 0);
return reg.code() - 1;
}
- static DoubleRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ static XMMRegister FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index + 1);
}
- static DoubleRegister from_code(int code) {
- DoubleRegister result = { code };
+ static XMMRegister from_code(int code) {
+ XMMRegister result = { code };
return result;
}
bool is_valid() const {
- return 0 <= code_ && code_ < NumRegisters();
+ return 0 <= code_ && code_ < kMaxNumRegisters;
}
+
int code() const {
ASSERT(is_valid());
return code_;
}
- int code_;
-};
-
-
-const DoubleRegister double_register_0 = { 0 };
-const DoubleRegister double_register_1 = { 1 };
-const DoubleRegister double_register_2 = { 2 };
-const DoubleRegister double_register_3 = { 3 };
-const DoubleRegister double_register_4 = { 4 };
-const DoubleRegister double_register_5 = { 5 };
-const DoubleRegister double_register_6 = { 6 };
-const DoubleRegister double_register_7 = { 7 };
-const DoubleRegister no_double_reg = { -1 };
-
-
-struct XMMRegister : DoubleRegister {
- static const int kNumAllocatableRegisters = 7;
- static const int kNumRegisters = 8;
-
- static XMMRegister from_code(int code) {
- STATIC_ASSERT(sizeof(XMMRegister) == sizeof(DoubleRegister));
- XMMRegister result;
- result.code_ = code;
- return result;
- }
-
bool is(XMMRegister reg) const { return code_ == reg.code_; }
- static XMMRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < NumAllocatableRegisters());
- return from_code(index + 1);
- }
-
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"xmm1",
"xmm2",
@@ -217,18 +187,23 @@
};
return names[index];
}
+
+ int code_;
};
-#define xmm0 (static_cast<const XMMRegister&>(double_register_0))
-#define xmm1 (static_cast<const XMMRegister&>(double_register_1))
-#define xmm2 (static_cast<const XMMRegister&>(double_register_2))
-#define xmm3 (static_cast<const XMMRegister&>(double_register_3))
-#define xmm4 (static_cast<const XMMRegister&>(double_register_4))
-#define xmm5 (static_cast<const XMMRegister&>(double_register_5))
-#define xmm6 (static_cast<const XMMRegister&>(double_register_6))
-#define xmm7 (static_cast<const XMMRegister&>(double_register_7))
-#define no_xmm_reg (static_cast<const XMMRegister&>(no_double_reg))
+typedef XMMRegister DoubleRegister;
+
+
+const XMMRegister xmm0 = { 0 };
+const XMMRegister xmm1 = { 1 };
+const XMMRegister xmm2 = { 2 };
+const XMMRegister xmm3 = { 3 };
+const XMMRegister xmm4 = { 4 };
+const XMMRegister xmm5 = { 5 };
+const XMMRegister xmm6 = { 6 };
+const XMMRegister xmm7 = { 7 };
+const XMMRegister no_xmm_reg = { -1 };
enum Condition {
@@ -481,9 +456,9 @@
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a CpuFeatureScope before use.
// Example:
-// if (assembler->IsSupported(CMOV)) {
-// CpuFeatureScope fscope(assembler, CMOV);
-// // Generate code containing cmov.
+// if (assembler->IsSupported(SSE3)) {
+// CpuFeatureScope fscope(assembler, SSE3);
+// // Generate code containing SSE3 instructions.
// } else {
// // Generate alternative code.
// }
@@ -499,7 +474,6 @@
if (Check(f, cross_compile_)) return true;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
- if (f == CMOV && !FLAG_enable_cmov) return false;
return Check(f, supported_);
}
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 03c4361..6fcc1a6 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -480,8 +480,8 @@
// restore them.
__ pushad();
if (save_doubles_ == kSaveFPRegs) {
- __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ __ sub(esp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
__ movsd(Operand(esp, i * kDoubleSize), reg);
}
@@ -496,11 +496,11 @@
ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
__ movsd(reg, Operand(esp, i * kDoubleSize));
}
- __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ __ add(esp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
}
__ popad();
__ ret(0);
@@ -629,15 +629,7 @@
__ shrd(result_reg, scratch1);
__ shr_cl(result_reg);
__ test(ecx, Immediate(32));
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatureScope use_cmov(masm, CMOV);
- __ cmov(not_equal, scratch1, result_reg);
- } else {
- Label skip_mov;
- __ j(equal, &skip_mov, Label::kNear);
- __ mov(scratch1, result_reg);
- __ bind(&skip_mov);
- }
+ __ cmov(not_equal, scratch1, result_reg);
}
// If the double was negative, negate the integer result.
@@ -649,15 +641,7 @@
} else {
__ cmp(exponent_operand, Immediate(0));
}
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatureScope use_cmov(masm, CMOV);
__ cmov(greater, result_reg, scratch1);
- } else {
- Label skip_mov;
- __ j(less_equal, &skip_mov, Label::kNear);
- __ mov(result_reg, scratch1);
- __ bind(&skip_mov);
- }
// Restore registers
__ bind(&done);
@@ -2068,32 +2052,12 @@
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, Label::kNear);
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatureScope use_cmov(masm, CMOV);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, ecx);
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, ecx);
- __ ret(0);
- } else {
- Label below_label, above_label;
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ j(below, &below_label, Label::kNear);
- __ j(above, &above_label, Label::kNear);
-
- __ Move(eax, Immediate(0));
- __ ret(0);
-
- __ bind(&below_label);
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- __ ret(0);
-
- __ bind(&above_label);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- __ ret(0);
- }
+ __ mov(eax, 0); // equal
+ __ mov(ecx, Immediate(Smi::FromInt(1)));
+ __ cmov(above, eax, ecx);
+ __ mov(ecx, Immediate(Smi::FromInt(-1)));
+ __ cmov(below, eax, ecx);
+ __ ret(0);
// If one of the numbers was NaN, then the result is always false.
// The cc is never not-equal.
@@ -3776,63 +3740,46 @@
__ JumpIfNotSmi(eax, &miss);
}
- // Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or SSE2 or CMOV is unsupported.
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatureScope scope2(masm, CMOV);
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(eax, &right_smi, Label::kNear);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ jmp(&left, Label::kNear);
+ __ bind(&right_smi);
+ __ mov(ecx, eax); // Can't clobber eax because we can still jump away.
+ __ SmiUntag(ecx);
+ __ Cvtsi2sd(xmm1, ecx);
- // Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(eax, &right_smi, Label::kNear);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined1, Label::kNear);
- __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ jmp(&left, Label::kNear);
- __ bind(&right_smi);
- __ mov(ecx, eax); // Can't clobber eax because we can still jump away.
- __ SmiUntag(ecx);
- __ Cvtsi2sd(xmm1, ecx);
+ __ bind(&left);
+ __ JumpIfSmi(edx, &left_smi, Label::kNear);
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ jmp(&done);
+ __ bind(&left_smi);
+ __ mov(ecx, edx); // Can't clobber edx because we can still jump away.
+ __ SmiUntag(ecx);
+ __ Cvtsi2sd(xmm0, ecx);
- __ bind(&left);
- __ JumpIfSmi(edx, &left_smi, Label::kNear);
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined2, Label::kNear);
- __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
- __ jmp(&done);
- __ bind(&left_smi);
- __ mov(ecx, edx); // Can't clobber edx because we can still jump away.
- __ SmiUntag(ecx);
- __ Cvtsi2sd(xmm0, ecx);
+ __ bind(&done);
+ // Compare operands.
+ __ ucomisd(xmm0, xmm1);
- __ bind(&done);
- // Compare operands.
- __ ucomisd(xmm0, xmm1);
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
-
- // Return a result of -1, 0, or 1, based on EFLAGS.
- // Performing mov, because xor would destroy the flag register.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, ecx);
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, ecx);
- __ ret(0);
- } else {
- __ mov(ecx, edx);
- __ and_(ecx, eax);
- __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
-
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined1, Label::kNear);
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined2, Label::kNear);
- }
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ // Performing mov, because xor would destroy the flag register.
+ __ mov(eax, 0); // equal
+ __ mov(ecx, Immediate(Smi::FromInt(1)));
+ __ cmov(above, eax, ecx);
+ __ mov(ecx, Immediate(Smi::FromInt(-1)));
+ __ cmov(below, eax, ecx);
+ __ ret(0);
__ bind(&unordered);
__ bind(&generic_stub);
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 588248f..4cefec0 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -336,9 +336,9 @@
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
if (mode == kSaveFPRegs) {
masm->sub(esp,
- Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
+ Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
// Save all XMM registers except XMM0.
- for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
+ for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
}
@@ -349,12 +349,12 @@
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
// Restore all XMM registers except XMM0.
- for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
+ for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
}
masm->add(esp,
- Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
+ Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
}
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index c061db1..05b2ce8 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -187,7 +187,7 @@
}
input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -209,7 +209,7 @@
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
@@ -239,9 +239,9 @@
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::kNumAllocatableRegisters;
+ XMMRegister::kMaxNumAllocatableRegisters;
__ sub(esp, Immediate(kDoubleRegsSize));
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ movsd(Operand(esp, offset), xmm_reg);
@@ -290,7 +290,7 @@
int double_regs_offset = FrameDescription::double_registers_offset();
// Fill in the double input registers.
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize;
__ movsd(xmm0, Operand(esp, src_offset));
@@ -373,7 +373,7 @@
__ j(below, &outer_push_loop);
// In case of a failed STUB, we have to restore the XMM registers.
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < XMMRegister::kMaxNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
__ movsd(xmm_reg, Operand(ebx, src_offset));
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 8129281..c4b26cd 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -827,16 +827,8 @@
void MacroAssembler::FCmp() {
- if (CpuFeatures::IsSupported(CMOV)) {
- fucomip();
- fstp(0);
- } else {
- fucompp();
- push(eax);
- fnstsw_ax();
- sahf();
- pop(eax);
- }
+ fucomip();
+ fstp(0);
}
@@ -979,10 +971,11 @@
void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
+ int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
+ argc * kPointerSize;
sub(esp, Immediate(space));
const int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
@@ -1025,7 +1018,7 @@
// Optionally restore all XMM registers.
if (save_doubles) {
const int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
}
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index 0cf6a9e..b726a9b 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -222,9 +222,13 @@
static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
Context* context = Context::cast(object);
- // We will mark cache black with a separate pass
- // when we finish marking.
- MarkObjectGreyDoNotEnqueue(context->normalized_map_cache());
+ // We will mark cache black with a separate pass when we finish marking.
+ // Note that GC can happen when the context is not fully initialized,
+ // so the cache can be undefined.
+ Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
+ if (!cache->IsUndefined()) {
+ MarkObjectGreyDoNotEnqueue(cache);
+ }
VisitNativeContext(map, context);
}
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 3801396..ff6d2e3 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -2757,7 +2757,7 @@
ASSERT(start + 1 == end);
Object* old_head = entries->object_at(start);
MarkCompactWeakObjectRetainer retainer;
- Object* head = VisitWeakList<Code>(heap(), old_head, &retainer, true);
+ Object* head = VisitWeakList<Code>(heap(), old_head, &retainer);
entries->set_object_at(new_start, head);
Object** slot = entries->slot_at(new_start);
RecordSlot(slot, slot, head);
diff --git a/src/objects-visiting.cc b/src/objects-visiting.cc
index 24cff34..aea8a09 100644
--- a/src/objects-visiting.cc
+++ b/src/objects-visiting.cc
@@ -191,6 +191,16 @@
}
+// We don't record weak slots during marking or scavenges. Instead we do it
+// once when we complete mark-compact cycle. Note that write barrier has no
+// effect if we are already in the middle of compacting mark-sweep cycle and we
+// have to record slots manually.
+static bool MustRecordSlots(Heap* heap) {
+ return heap->gc_state() == Heap::MARK_COMPACT &&
+ heap->mark_compact_collector()->is_compacting();
+}
+
+
template <class T>
struct WeakListVisitor;
@@ -198,12 +208,12 @@
template <class T>
Object* VisitWeakList(Heap* heap,
Object* list,
- WeakObjectRetainer* retainer,
- bool record_slots) {
+ WeakObjectRetainer* retainer) {
Object* undefined = heap->undefined_value();
Object* head = undefined;
T* tail = NULL;
MarkCompactCollector* collector = heap->mark_compact_collector();
+ bool record_slots = MustRecordSlots(heap);
while (list != undefined) {
// Check whether to keep the candidate in the list.
T* candidate = reinterpret_cast<T*>(list);
@@ -229,8 +239,7 @@
// tail is a live object, visit it.
- WeakListVisitor<T>::VisitLiveObject(
- heap, tail, retainer, record_slots);
+ WeakListVisitor<T>::VisitLiveObject(heap, tail, retainer);
} else {
WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
}
@@ -273,12 +282,9 @@
return JSFunction::kNextFunctionLinkOffset;
}
- static void VisitLiveObject(Heap*, JSFunction*,
- WeakObjectRetainer*, bool) {
- }
+ static void VisitLiveObject(Heap*, JSFunction*, WeakObjectRetainer*) {}
- static void VisitPhantomObject(Heap*, JSFunction*) {
- }
+ static void VisitPhantomObject(Heap*, JSFunction*) {}
};
@@ -296,12 +302,9 @@
return Code::kNextCodeLinkOffset;
}
- static void VisitLiveObject(Heap*, Code*,
- WeakObjectRetainer*, bool) {
- }
+ static void VisitLiveObject(Heap*, Code*, WeakObjectRetainer*) {}
- static void VisitPhantomObject(Heap*, Code*) {
- }
+ static void VisitPhantomObject(Heap*, Code*) {}
};
@@ -317,33 +320,32 @@
return context->get(Context::NEXT_CONTEXT_LINK);
}
+ static int WeakNextOffset() {
+ return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
+ }
+
static void VisitLiveObject(Heap* heap,
Context* context,
- WeakObjectRetainer* retainer,
- bool record_slots) {
+ WeakObjectRetainer* retainer) {
// Process the three weak lists linked off the context.
- DoWeakList<JSFunction>(heap, context, retainer, record_slots,
+ DoWeakList<JSFunction>(heap, context, retainer,
Context::OPTIMIZED_FUNCTIONS_LIST);
- DoWeakList<Code>(heap, context, retainer, record_slots,
- Context::OPTIMIZED_CODE_LIST);
- DoWeakList<Code>(heap, context, retainer, record_slots,
- Context::DEOPTIMIZED_CODE_LIST);
+ DoWeakList<Code>(heap, context, retainer, Context::OPTIMIZED_CODE_LIST);
+ DoWeakList<Code>(heap, context, retainer, Context::DEOPTIMIZED_CODE_LIST);
}
template<class T>
static void DoWeakList(Heap* heap,
Context* context,
WeakObjectRetainer* retainer,
- bool record_slots,
int index) {
// Visit the weak list, removing dead intermediate elements.
- Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer,
- record_slots);
+ Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer);
// Update the list head.
context->set(index, list_head, UPDATE_WRITE_BARRIER);
- if (record_slots) {
+ if (MustRecordSlots(heap)) {
// Record the updated slot if necessary.
Object** head_slot = HeapObject::RawField(
context, FixedArray::SizeFor(index));
@@ -358,10 +360,6 @@
ClearWeakList<Code>(heap, context->get(Context::OPTIMIZED_CODE_LIST));
ClearWeakList<Code>(heap, context->get(Context::DEOPTIMIZED_CODE_LIST));
}
-
- static int WeakNextOffset() {
- return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
- }
};
@@ -375,16 +373,13 @@
return obj->weak_next();
}
- static void VisitLiveObject(Heap*,
- JSArrayBufferView* obj,
- WeakObjectRetainer* retainer,
- bool record_slots) {}
-
- static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
-
static int WeakNextOffset() {
return JSArrayBufferView::kWeakNextOffset;
}
+
+ static void VisitLiveObject(Heap*, JSArrayBufferView*, WeakObjectRetainer*) {}
+
+ static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
};
@@ -398,17 +393,20 @@
return obj->weak_next();
}
+ static int WeakNextOffset() {
+ return JSArrayBuffer::kWeakNextOffset;
+ }
+
static void VisitLiveObject(Heap* heap,
JSArrayBuffer* array_buffer,
- WeakObjectRetainer* retainer,
- bool record_slots) {
+ WeakObjectRetainer* retainer) {
Object* typed_array_obj =
VisitWeakList<JSArrayBufferView>(
heap,
array_buffer->weak_first_view(),
- retainer, record_slots);
+ retainer);
array_buffer->set_weak_first_view(typed_array_obj);
- if (typed_array_obj != heap->undefined_value() && record_slots) {
+ if (typed_array_obj != heap->undefined_value() && MustRecordSlots(heap)) {
Object** slot = HeapObject::RawField(
array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
@@ -418,10 +416,6 @@
static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
Runtime::FreeArrayBuffer(heap->isolate(), phantom);
}
-
- static int WeakNextOffset() {
- return JSArrayBuffer::kWeakNextOffset;
- }
};
@@ -435,36 +429,33 @@
return obj->weak_next();
}
- static void VisitLiveObject(Heap* heap,
- AllocationSite* site,
- WeakObjectRetainer* retainer,
- bool record_slots) {}
-
- static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
-
static int WeakNextOffset() {
return AllocationSite::kWeakNextOffset;
}
+
+ static void VisitLiveObject(Heap*, AllocationSite*, WeakObjectRetainer*) {}
+
+ static void VisitPhantomObject(Heap*, AllocationSite*) {}
};
template Object* VisitWeakList<Code>(
- Heap* heap, Object* list, WeakObjectRetainer* retainer, bool record_slots);
+ Heap* heap, Object* list, WeakObjectRetainer* retainer);
template Object* VisitWeakList<JSFunction>(
- Heap* heap, Object* list, WeakObjectRetainer* retainer, bool record_slots);
+ Heap* heap, Object* list, WeakObjectRetainer* retainer);
template Object* VisitWeakList<Context>(
- Heap* heap, Object* list, WeakObjectRetainer* retainer, bool record_slots);
+ Heap* heap, Object* list, WeakObjectRetainer* retainer);
template Object* VisitWeakList<JSArrayBuffer>(
- Heap* heap, Object* list, WeakObjectRetainer* retainer, bool record_slots);
+ Heap* heap, Object* list, WeakObjectRetainer* retainer);
template Object* VisitWeakList<AllocationSite>(
- Heap* heap, Object* list, WeakObjectRetainer* retainer, bool record_slots);
+ Heap* heap, Object* list, WeakObjectRetainer* retainer);
} } // namespace v8::internal
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index 05f8257..d9ab02a 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -469,10 +469,7 @@
// pointers. The template parameter T is a WeakListVisitor that defines how to
// access the next-element pointers.
template <class T>
-Object* VisitWeakList(Heap* heap,
- Object* list,
- WeakObjectRetainer* retainer,
- bool record_slots);
+Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer);
} } // namespace v8::internal
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 1fb6da7..1370ded 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -54,14 +54,7 @@
uint64_t OS::CpuFeaturesImpliedByPlatform() {
-#if V8_OS_MACOSX
- // Mac OS X requires CMOV to install so we can assume it is present.
- // These constants are defined by the CPUid instructions.
- const uint64_t one = 1;
- return one << CMOV;
-#else
- return 0; // Nothing special about the other systems.
-#endif
+ return 0; // Nothing special.
}
diff --git a/src/platform.h b/src/platform.h
index 492e5e4..e2e4cb9 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -268,8 +268,7 @@
static void SignalCodeMovingGC();
// The return value indicates the CPU features we are sure of because of the
- // OS. For example MacOSX doesn't run on any x86 CPUs that don't have CMOV
- // instructions.
+ // OS.
// This is a little messy because the interpretation is subject to the cross
// of the CPU and the OS. The bits in the answer correspond to the bit
// positions indicated by the members of the CpuFeature enum from globals.h
diff --git a/src/v8globals.h b/src/v8globals.h
index f2ab24a..3f4d831 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -403,7 +403,6 @@
// On X86/X64, values below 32 are bits in EDX, values above 32 are bits in ECX.
enum CpuFeature { SSE4_1 = 32 + 19, // x86
SSE3 = 32 + 0, // x86
- CMOV = 15, // x86
VFP3 = 1, // ARM
ARMv7 = 2, // ARM
SUDIV = 3, // ARM
diff --git a/src/version.cc b/src/version.cc
index ccf7158..e2885c1 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 27
-#define BUILD_NUMBER 2
+#define BUILD_NUMBER 3
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index bdb39a5..27d6cc3 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -19,7 +19,7 @@
#ifdef DEBUG
bool CpuFeatures::initialized_ = false;
#endif
-uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
+uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0;
uint64_t CpuFeatures::cross_compile_ = 0;
@@ -30,11 +30,11 @@
void CpuFeatures::Probe(bool serializer_enabled) {
- ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
+ ASSERT(supported_ == 0);
#ifdef DEBUG
initialized_ = true;
#endif
- supported_ = kDefaultCpuFeatures;
+ supported_ = 0;
if (serializer_enabled) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
return; // No features if we might serialize.
@@ -54,7 +54,6 @@
// CMOV must be available on every x64 CPU.
ASSERT(cpu.has_cmov());
- probed_features |= static_cast<uint64_t>(1) << CMOV;
// SAHF is not generally available in long mode.
if (cpu.has_sahf()) {
@@ -63,8 +62,7 @@
uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform();
supported_ = probed_features | platform_features;
- found_by_runtime_probing_only_
- = probed_features & ~kDefaultCpuFeatures & ~platform_features;
+ found_by_runtime_probing_only_ = probed_features & ~platform_features;
}
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 552c4f8..9d421ef 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -458,7 +458,6 @@
ASSERT(initialized_);
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
- if (f == CMOV && !FLAG_enable_cmov) return false;
if (f == SAHF && !FLAG_enable_sahf) return false;
return Check(f, supported_);
}
@@ -491,12 +490,6 @@
return static_cast<uint64_t>(1) << f;
}
- // Safe defaults include CMOV for X64. It is always available, if
- // anyone checks, but they shouldn't need to check.
- // The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
- // fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
- static const uint64_t kDefaultCpuFeatures = (1 << CMOV);
-
#ifdef DEBUG
static bool initialized_;
#endif
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index de2bbdf..b369e839 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -414,25 +414,22 @@
// cmov.
{
- if (CpuFeatures::IsSupported(CMOV)) {
- CpuFeatureScope use_cmov(&assm, CMOV);
- __ cmov(overflow, eax, Operand(eax, 0));
- __ cmov(no_overflow, eax, Operand(eax, 1));
- __ cmov(below, eax, Operand(eax, 2));
- __ cmov(above_equal, eax, Operand(eax, 3));
- __ cmov(equal, eax, Operand(ebx, 0));
- __ cmov(not_equal, eax, Operand(ebx, 1));
- __ cmov(below_equal, eax, Operand(ebx, 2));
- __ cmov(above, eax, Operand(ebx, 3));
- __ cmov(sign, eax, Operand(ecx, 0));
- __ cmov(not_sign, eax, Operand(ecx, 1));
- __ cmov(parity_even, eax, Operand(ecx, 2));
- __ cmov(parity_odd, eax, Operand(ecx, 3));
- __ cmov(less, eax, Operand(edx, 0));
- __ cmov(greater_equal, eax, Operand(edx, 1));
- __ cmov(less_equal, eax, Operand(edx, 2));
- __ cmov(greater, eax, Operand(edx, 3));
- }
+ __ cmov(overflow, eax, Operand(eax, 0));
+ __ cmov(no_overflow, eax, Operand(eax, 1));
+ __ cmov(below, eax, Operand(eax, 2));
+ __ cmov(above_equal, eax, Operand(eax, 3));
+ __ cmov(equal, eax, Operand(ebx, 0));
+ __ cmov(not_equal, eax, Operand(ebx, 1));
+ __ cmov(below_equal, eax, Operand(ebx, 2));
+ __ cmov(above, eax, Operand(ebx, 3));
+ __ cmov(sign, eax, Operand(ecx, 0));
+ __ cmov(not_sign, eax, Operand(ecx, 1));
+ __ cmov(parity_even, eax, Operand(ecx, 2));
+ __ cmov(parity_odd, eax, Operand(ecx, 3));
+ __ cmov(less, eax, Operand(edx, 0));
+ __ cmov(greater_equal, eax, Operand(edx, 1));
+ __ cmov(less_equal, eax, Operand(edx, 2));
+ __ cmov(greater, eax, Operand(edx, 3));
}
{
diff --git a/test/mjsunit/array-natives-elements.js b/test/mjsunit/array-natives-elements.js
index cf848bb..f64818d 100644
--- a/test/mjsunit/array-natives-elements.js
+++ b/test/mjsunit/array-natives-elements.js
@@ -54,29 +54,30 @@
assertTrue(%HasFastDoubleElements([1.1]));
assertTrue(%HasFastDoubleElements([1.1,2]));
- // Push
- var a0 = [1, 2, 3];
- if (%HasFastSmiElements(a0)) {
- assertTrue(%HasFastSmiElements(a0));
- a0.push(4);
- assertTrue(%HasFastSmiElements(a0));
- a0.push(1.3);
- assertTrue(%HasFastDoubleElements(a0));
- a0.push(1.5);
- assertTrue(%HasFastDoubleElements(a0));
- a0.push({});
- assertTrue(%HasFastObjectElements(a0));
- a0.push({});
- assertTrue(%HasFastObjectElements(a0));
- } else {
- assertTrue(%HasFastObjectElements(a0));
- a0.push(4);
- a0.push(1.3);
- a0.push(1.5);
- a0.push({});
- a0.push({});
- assertTrue(%HasFastObjectElements(a0));
+ // This code exists to eliminate the learning influence of AllocationSites
+ // on the following tests.
+ var __sequence = 0;
+ function make_array_string(literal) {
+ this.__sequence = this.__sequence + 1;
+ return "/* " + this.__sequence + " */ " + literal;
}
+ function make_array(literal) {
+ return eval(make_array_string(literal));
+ }
+
+ // Push
+ var a0 = make_array("[1, 2, 3]");
+ assertTrue(%HasFastSmiElements(a0));
+ a0.push(4);
+ assertTrue(%HasFastSmiElements(a0));
+ a0.push(1.3);
+ assertTrue(%HasFastDoubleElements(a0));
+ a0.push(1.5);
+ assertTrue(%HasFastDoubleElements(a0));
+ a0.push({});
+ assertTrue(%HasFastObjectElements(a0));
+ a0.push({});
+ assertTrue(%HasFastObjectElements(a0));
assertEquals([1,2,3,4,1.3,1.5,{},{}], a0);
// Concat