New version of v8 from bleeding edge at revision 3649
diff --git a/AUTHORS b/AUTHORS
index 4fd7aa5..af0ecde 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -13,10 +13,11 @@
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
Joel Stanley <joel.stan@gmail.com>
+John Jozwiak <jjozwiak@codeaurora.org>
Matt Hanselman <mjhanselman@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Rafal Krypa <rafal@krypa.net>
Rene Rebe <rene@exactcode.de>
Ryan Dahl <coldredlemur@gmail.com>
Patrick Gansterer <paroga@paroga.com>
-John Jozwiak <jjozwiak@codeaurora.org>
+Subrato K De <subratokde@codeaurora.org>
diff --git a/ChangeLog b/ChangeLog
index 825431c..192dd25 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,51 @@
+2010-01-14: Version 2.0.6
+
+ Added ES5 Object.getPrototypeOf, GetOwnPropertyDescriptor,
+ GetOwnProperty, FromPropertyDescriptor.
+
+ Fixed Mac x64 build errors.
+
+ Improved performance of some math and string operations.
+
+ Improved performance of some regexp operations.
+
+ Improved performance of context creation.
+
+ Improved performance of hash tables.
+
+
+2009-12-18: Version 2.0.5
+
+ Extended to upper limit of map space to allow for 7 times as many map
+ to be allocated (issue 524).
+
+ Improved performance of code using closures.
+
+ Improved performance of some binary operations involving doubles.
+
+
+2009-12-16: Version 2.0.4
+
+ Added ECMAScript 5 Object.create.
+
+ Improved performance of Math.max and Math.min.
+
+ Optimized adding of strings on 64-bit platforms.
+
+ Improved handling of external strings by using a separate table
+ instead of weak handles. This improves garbage collection
+ performance and uses less memory.
+
+ Changed code generation for object and array literals in toplevel
+ code to be more compact by doing more work in the runtime.
+
+ Fixed a crash bug triggered when garbage collection happened during
+ generation of a callback load inline cache stub.
+
+ Fixed crash bug sometimes triggered when local variables shadowed
+ parameters in functions that used the arguments object.
+
+
2009-12-03: Version 2.0.3
Optimized handling and adding of strings, for-in and Array.join.
@@ -35,7 +83,7 @@
Reverted a change which caused Chromium interactive ui test
failures.
-
+
2009-11-18: Version 2.0.0
Added support for VFP on ARM.
@@ -80,7 +128,7 @@
2009-10-16: Version 1.3.16
-
+
X64: Convert smis to holding 32 bits of payload.
Introduce v8::Integer::NewFromUnsigned method.
@@ -225,7 +273,7 @@
notifications when V8 has not yet been initialized.
Fixed ARM simulator compilation problem on Windows.
-
+
2009-08-25: Version 1.3.7
@@ -340,9 +388,9 @@
function is a built-in.
Initial implementation of constructor heap profile for JS objects.
-
+
More fine grained control of profiling aspects through the API.
-
+
Optimized the called as constructor check for API calls.
@@ -367,8 +415,8 @@
Added an external allocation limit to avoid issues where small V8
objects would hold on to large amounts of external memory without
causing garbage collections.
-
- Finished more of the inline caching stubs for x64 targets.
+
+ Finished more of the inline caching stubs for x64 targets.
2009-07-13: Version 1.2.14
@@ -448,9 +496,9 @@
Fixed a bug in the string type inference.
Fixed a bug in the handling of 'constant function' properties.
-
+
Improved overall performance.
-
+
2009-06-16: Version 1.2.8
diff --git a/SConstruct b/SConstruct
index 2087a94..739e344 100755
--- a/SConstruct
+++ b/SConstruct
@@ -143,6 +143,9 @@
},
'os:macos': {
'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'],
+ 'library:shared': {
+ 'CPPDEFINES': ['V8_SHARED']
+ }
},
'os:freebsd': {
'CPPPATH' : ['/usr/local/include'],
@@ -178,6 +181,12 @@
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
+ 'armvariant:thumb2': {
+ 'CPPDEFINES': ['V8_ARM_VARIANT_THUMB']
+ },
+ 'armvariant:arm': {
+ 'CPPDEFINES': ['V8_ARM_VARIANT_ARM']
+ },
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'CCFLAGS': ['-m64'],
@@ -656,6 +665,11 @@
'values': ['default', 'hidden'],
'default': 'hidden',
'help': 'shared library symbol visibility'
+ },
+ 'armvariant': {
+ 'values': ['arm', 'thumb2', 'none'],
+ 'default': 'none',
+ 'help': 'generate thumb2 instructions instead of arm instructions (default)'
}
}
@@ -663,7 +677,7 @@
def GetOptions():
result = Options()
result.Add('mode', 'compilation mode (debug, release)', 'release')
- result.Add('sample', 'build sample (shell, process)', '')
+ result.Add('sample', 'build sample (shell, process, lineprocessor)', '')
result.Add('env', 'override environment settings (NAME0:value0,NAME1:value1,...)', '')
result.Add('importenv', 'import environment settings (NAME0,NAME1,...)', '')
for (name, option) in SIMPLE_OPTIONS.iteritems():
@@ -731,7 +745,7 @@
def VerifyOptions(env):
if not IsLegal(env, 'mode', ['debug', 'release']):
return False
- if not IsLegal(env, 'sample', ["shell", "process"]):
+ if not IsLegal(env, 'sample', ["shell", "process", "lineprocessor"]):
return False
if not IsLegal(env, 'regexp', ["native", "interpreted"]):
return False
@@ -839,6 +853,10 @@
# Print a warning if profiling is enabled without profiling support
print "Warning: forcing profilingsupport on when prof is on"
options['profilingsupport'] = 'on'
+ if (options['armvariant'] == 'none' and options['arch'] == 'arm'):
+ options['armvariant'] = 'arm'
+ if (options['armvariant'] != 'none' and options['arch'] != 'arm'):
+ options['armvariant'] = 'none'
def ParseEnvOverrides(arg, imports):
@@ -931,6 +949,7 @@
d8_env = Environment()
d8_env.Replace(**context.flags['d8'])
+ context.ApplyEnvOverrides(d8_env)
shell = d8_env.Program('d8' + suffix, object_files + shell_files)
context.d8_targets.append(shell)
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index be360d8..3f20451 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,4 +1,4 @@
We sync with Chromium release revision, which has both webkit revision and V8 revision.
http://src.chromium.org/svn/releases/4.0.269.0/DEPS
-http://v8.googlecode.com/svn/branches/bleeding_edge@3431
+http://v8.googlecode.com/svn/branches/bleeding_edge@3649
diff --git a/include/v8-debug.h b/include/v8-debug.h
index b27bacc..2e5fb3f 100644
--- a/include/v8-debug.h
+++ b/include/v8-debug.h
@@ -224,9 +224,11 @@
* be processed. Note that debug messages will only be processed if there is
* a V8 break. This can happen automatically by using the option
* --debugger-auto-break.
+ * \param provide_locker requires that V8 acquires v8::Locker for you before
+ * calling handler
*/
static void SetDebugMessageDispatchHandler(
- DebugMessageDispatchHandler handler);
+ DebugMessageDispatchHandler handler, bool provide_locker = false);
/**
* Run a JavaScript function in the debugger.
@@ -258,8 +260,48 @@
* supplied TCP/IP port for remote debugger connection.
* \param name the name of the embedding application
* \param port the TCP/IP port to listen on
+ * \param wait_for_connection whether V8 should pause on a first statement
+ * allowing remote debugger to connect before anything interesting happened
*/
- static bool EnableAgent(const char* name, int port);
+ static bool EnableAgent(const char* name, int port,
+ bool wait_for_connection = false);
+
+ /**
+ * Makes V8 process all pending debug messages.
+ *
+ * From V8 point of view all debug messages come asynchronously (e.g. from
+ * remote debugger) but they all must be handled synchronously: V8 cannot
+ * do 2 things at one time so normal script execution must be interrupted
+ * for a while.
+ *
+ * Generally when message arrives V8 may be in one of 3 states:
+ * 1. V8 is running script; V8 will automatically interrupt and process all
+ * pending messages (however auto_break flag should be enabled);
+ * 2. V8 is suspended on debug breakpoint; in this state V8 is dedicated
+ * to reading and processing debug messages;
+ * 3. V8 is not running at all or has called some long-working C++ function;
+ * by default it means that processing of all debug message will be deferred
+ * until V8 gets control again; however, embedding application may improve
+ * this by manually calling this method.
+ *
+ * It makes sense to call this method whenever a new debug message arrived and
+ * V8 is not already running. Method v8::Debug::SetDebugMessageDispatchHandler
+ * should help with the former condition.
+ *
+ * Technically this method in many senses is equivalent to executing empty
+ * script:
+ * 1. It does nothing except for processing all pending debug messages.
+ * 2. It should be invoked with the same precautions and from the same context
+ * as V8 script would be invoked from, because:
+ * a. with "evaluate" command it can do whatever normal script can do,
+ * including all native calls;
+ * b. no other thread should call V8 while this method is running
+ * (v8::Locker may be used here).
+ *
+ * "Evaluate" debug command behavior currently is not specified in scope
+ * of this method.
+ */
+ static void ProcessDebugMessages();
};
diff --git a/include/v8.h b/include/v8.h
index a8ee8d4..6125286 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -503,6 +503,7 @@
virtual int Length() = 0;
virtual unsigned* Data() = 0;
+ virtual bool HasError() = 0;
};
@@ -833,13 +834,26 @@
* Returns true if the string is both external and ascii
*/
bool IsExternalAscii() const;
+
+ class V8EXPORT ExternalStringResourceBase {
+ public:
+ virtual ~ExternalStringResourceBase() {}
+ protected:
+ ExternalStringResourceBase() {}
+ private:
+ // Disallow copying and assigning.
+ ExternalStringResourceBase(const ExternalStringResourceBase&);
+ void operator=(const ExternalStringResourceBase&);
+ };
+
/**
* An ExternalStringResource is a wrapper around a two-byte string
* buffer that resides outside V8's heap. Implement an
* ExternalStringResource to manage the life cycle of the underlying
* buffer. Note that the string data must be immutable.
*/
- class V8EXPORT ExternalStringResource { // NOLINT
+ class V8EXPORT ExternalStringResource
+ : public ExternalStringResourceBase {
public:
/**
* Override the destructor to manage the life cycle of the underlying
@@ -852,10 +866,6 @@
virtual size_t length() const = 0;
protected:
ExternalStringResource() {}
- private:
- // Disallow copying and assigning.
- ExternalStringResource(const ExternalStringResource&);
- void operator=(const ExternalStringResource&);
};
/**
@@ -869,7 +879,8 @@
* Use String::New or convert to 16 bit data for non-ASCII.
*/
- class V8EXPORT ExternalAsciiStringResource { // NOLINT
+ class V8EXPORT ExternalAsciiStringResource
+ : public ExternalStringResourceBase {
public:
/**
* Override the destructor to manage the life cycle of the underlying
@@ -882,10 +893,6 @@
virtual size_t length() const = 0;
protected:
ExternalAsciiStringResource() {}
- private:
- // Disallow copying and assigning.
- ExternalAsciiStringResource(const ExternalAsciiStringResource&);
- void operator=(const ExternalAsciiStringResource&);
};
/**
diff --git a/samples/lineprocessor.cc b/samples/lineprocessor.cc
new file mode 100644
index 0000000..2e8092e
--- /dev/null
+++ b/samples/lineprocessor.cc
@@ -0,0 +1,427 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <v8.h>
+#include <v8-debug.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+/**
+ * This sample program should demonstrate certain aspects of debugging
+ * standalone V8-based application.
+ *
+ * The program reads input stream, processes it line by line and print
+ * the result to output. The actual processing is done by custom JavaScript
+ * script. The script is specified with command line parameters.
+ *
+ * The main cycle of the program will sequentially read lines from standard
+ * input, process them and print to standard output until input closes.
+ * There are 2 possible configuration in regard to main cycle.
+ *
+ * 1. The main cycle is on C++ side. Program should be run with
+ * --main-cycle-in-cpp option. Script must declare a function named
+ * "ProcessLine". The main cycle in C++ reads lines and calls this function
+ * for processing every time. This is a sample script:
+
+function ProcessLine(input_line) {
+ return ">>>" + input_line + "<<<";
+}
+
+ *
+ * 2. The main cycle is in JavaScript. Program should be run with
+ * --main-cycle-in-js option. Script gets run one time at all and gets
+ * API of 2 global functions: "read_line" and "print". It should read input
+ * and print converted lines to output itself. This a sample script:
+
+while (true) {
+ var line = read_line();
+ if (!line) {
+ break;
+ }
+ var res = line + " | " + line;
+ print(res);
+}
+
+ *
+ * When run with "-p" argument, the program starts V8 Debugger Agent and
+ * allows remote debugger to attach and debug JavaScript code.
+ *
+ * Interesting aspects:
+ * 1. Wait for remote debugger to attach
+ * Normally the program compiles custom script and immediately runs it.
+ * If programmer needs to debug script from the very beginning, he should
+ * run this sample program with "--wait-for-connection" command line parameter.
+ * This way V8 will suspend on the first statement and wait for
+ * debugger to attach.
+ *
+ * 2. Unresponsive V8
+ * V8 Debugger Agent holds a connection with remote debugger, but it does
+ * respond only when V8 is running some script. In particular, when this program
+ * is waiting for input, all requests from debugger get deferred until V8
+ * is called again. See how "--callback" command-line parameter in this sample
+ * fixes this issue.
+ */
+
+enum MainCycleType {
+ CycleInCpp,
+ CycleInJs
+};
+
+const char* ToCString(const v8::String::Utf8Value& value);
+void ReportException(v8::TryCatch* handler);
+v8::Handle<v8::String> ReadFile(const char* name);
+v8::Handle<v8::String> ReadLine();
+
+v8::Handle<v8::Value> Print(const v8::Arguments& args);
+v8::Handle<v8::Value> ReadLine(const v8::Arguments& args);
+bool RunCppCycle(v8::Handle<v8::Script> script, v8::Local<v8::Context> context,
+ bool report_exceptions);
+
+v8::Persistent<v8::Context> debug_message_context;
+
+
+void DispatchDebugMessages() {
+ // We are in some random thread. We should already have v8::Locker acquired
+ // (we requested this when registered this callback). We was called
+ // because new debug messages arrived; they may have already been processed,
+ // but we shouldn't worry about this.
+ //
+ // All we have to do is to set context and call ProcessDebugMessages.
+ //
+ // We should decide which V8 context to use here. This is important for
+ // "evaluate" command, because it must be executed some context.
+ // In our sample we have only one context, so there is nothing really to
+ // think about.
+ v8::Context::Scope scope(debug_message_context);
+
+ v8::Debug::ProcessDebugMessages();
+}
+
+
+int RunMain(int argc, char* argv[]) {
+ v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+ v8::HandleScope handle_scope;
+
+ v8::Handle<v8::String> script_source(NULL);
+ v8::Handle<v8::Value> script_name(NULL);
+ int script_param_counter = 0;
+
+ int port_number = -1;
+ bool wait_for_connection = false;
+ bool support_callback = true;
+ MainCycleType cycle_type = CycleInCpp;
+
+ for (int i = 1; i < argc; i++) {
+ const char* str = argv[i];
+ if (strcmp(str, "-f") == 0) {
+ // Ignore any -f flags for compatibility with the other stand-
+ // alone JavaScript engines.
+ continue;
+ } else if (strcmp(str, "--callback") == 0) {
+ // TODO(548): implement this.
+ printf("Error: debugger agent callback is not supported yet.\n");
+ return 1;
+ } else if (strcmp(str, "--wait-for-connection") == 0) {
+ wait_for_connection = true;
+ } else if (strcmp(str, "--main-cycle-in-cpp") == 0) {
+ cycle_type = CycleInCpp;
+ } else if (strcmp(str, "--main-cycle-in-js") == 0) {
+ cycle_type = CycleInJs;
+ } else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
+ port_number = atoi(argv[i + 1]);
+ i++;
+ } else if (strncmp(str, "--", 2) == 0) {
+ printf("Warning: unknown flag %s.\nTry --help for options\n", str);
+ } else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
+ script_source = v8::String::New(argv[i + 1]);
+ script_name = v8::String::New("unnamed");
+ i++;
+ script_param_counter++;
+ } else {
+ // Use argument as a name of file to load.
+ script_source = ReadFile(str);
+ script_name = v8::String::New(str);
+ if (script_source.IsEmpty()) {
+ printf("Error reading '%s'\n", str);
+ return 1;
+ }
+ script_param_counter++;
+ }
+ }
+
+ if (script_param_counter == 0) {
+ printf("Script is not specified\n");
+ return 1;
+ }
+ if (script_param_counter != 1) {
+ printf("Only one script may be specified\n");
+ return 1;
+ }
+
+ // Create a template for the global object.
+ v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
+
+ // Bind the global 'print' function to the C++ Print callback.
+ global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
+
+ if (cycle_type == CycleInJs) {
+ // Bind the global 'read_line' function to the C++ Print callback.
+ global->Set(v8::String::New("read_line"),
+ v8::FunctionTemplate::New(ReadLine));
+ }
+
+ // Create a new execution environment containing the built-in
+ // functions
+ v8::Handle<v8::Context> context = v8::Context::New(NULL, global);
+ debug_message_context = v8::Persistent<v8::Context>::New(context);
+
+
+ // Enter the newly created execution environment.
+ v8::Context::Scope context_scope(context);
+
+ v8::Locker locker;
+
+ if (support_callback) {
+ v8::Debug::SetDebugMessageDispatchHandler(DispatchDebugMessages, true);
+ }
+
+ if (port_number != -1) {
+ const char* auto_break_param = "--debugger_auto_break";
+ v8::V8::SetFlagsFromString(auto_break_param, strlen(auto_break_param));
+ v8::Debug::EnableAgent("lineprocessor", port_number, wait_for_connection);
+ }
+
+ bool report_exceptions = true;
+
+ v8::Handle<v8::Script> script;
+ {
+ // Compile script in try/catch context.
+ v8::TryCatch try_catch;
+ script = v8::Script::Compile(script_source, script_name);
+ if (script.IsEmpty()) {
+ // Print errors that happened during compilation.
+ if (report_exceptions)
+ ReportException(&try_catch);
+ return 1;
+ }
+ }
+
+ {
+ v8::TryCatch try_catch;
+
+ script->Run();
+ if (try_catch.HasCaught()) {
+ if (report_exceptions)
+ ReportException(&try_catch);
+ return 1;
+ }
+ }
+
+ if (cycle_type == CycleInCpp) {
+ bool res = RunCppCycle(script, v8::Context::GetCurrent(),
+ report_exceptions);
+ return !res;
+ } else {
+ // All is already done.
+ }
+ return 0;
+}
+
+
+bool RunCppCycle(v8::Handle<v8::Script> script, v8::Local<v8::Context> context,
+ bool report_exceptions) {
+ v8::Locker lock;
+
+ v8::Handle<v8::String> fun_name = v8::String::New("ProcessLine");
+ v8::Handle<v8::Value> process_val =
+ v8::Context::GetCurrent()->Global()->Get(fun_name);
+
+ // If there is no Process function, or if it is not a function,
+ // bail out
+ if (!process_val->IsFunction()) {
+ printf("Error: Script does not declare 'ProcessLine' global function.\n");
+ return 1;
+ }
+
+ // It is a function; cast it to a Function
+ v8::Handle<v8::Function> process_fun =
+ v8::Handle<v8::Function>::Cast(process_val);
+
+
+ while (!feof(stdin)) {
+ v8::HandleScope handle_scope;
+
+ v8::Handle<v8::String> input_line = ReadLine();
+ if (input_line == v8::Undefined()) {
+ continue;
+ }
+
+ const int argc = 1;
+ v8::Handle<v8::Value> argv[argc] = { input_line };
+
+ v8::Handle<v8::Value> result;
+ {
+ v8::TryCatch try_catch;
+ result = process_fun->Call(v8::Context::GetCurrent()->Global(),
+ argc, argv);
+ if (try_catch.HasCaught()) {
+ if (report_exceptions)
+ ReportException(&try_catch);
+ return false;
+ }
+ }
+ v8::String::Utf8Value str(result);
+ const char* cstr = ToCString(str);
+ printf("%s\n", cstr);
+ }
+
+ return true;
+}
+
+int main(int argc, char* argv[]) {
+ int result = RunMain(argc, argv);
+ v8::V8::Dispose();
+ return result;
+}
+
+
+// Extracts a C string from a V8 Utf8Value.
+const char* ToCString(const v8::String::Utf8Value& value) {
+ return *value ? *value : "<string conversion failed>";
+}
+
+
+// Reads a file into a v8 string.
+v8::Handle<v8::String> ReadFile(const char* name) {
+ FILE* file = fopen(name, "rb");
+ if (file == NULL) return v8::Handle<v8::String>();
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+ rewind(file);
+
+ char* chars = new char[size + 1];
+ chars[size] = '\0';
+ for (int i = 0; i < size;) {
+ int read = fread(&chars[i], 1, size - i, file);
+ i += read;
+ }
+ fclose(file);
+ v8::Handle<v8::String> result = v8::String::New(chars, size);
+ delete[] chars;
+ return result;
+}
+
+
+void ReportException(v8::TryCatch* try_catch) {
+ v8::HandleScope handle_scope;
+ v8::String::Utf8Value exception(try_catch->Exception());
+ const char* exception_string = ToCString(exception);
+ v8::Handle<v8::Message> message = try_catch->Message();
+ if (message.IsEmpty()) {
+ // V8 didn't provide any extra information about this error; just
+ // print the exception.
+ printf("%s\n", exception_string);
+ } else {
+ // Print (filename):(line number): (message).
+ v8::String::Utf8Value filename(message->GetScriptResourceName());
+ const char* filename_string = ToCString(filename);
+ int linenum = message->GetLineNumber();
+ printf("%s:%i: %s\n", filename_string, linenum, exception_string);
+ // Print line of source code.
+ v8::String::Utf8Value sourceline(message->GetSourceLine());
+ const char* sourceline_string = ToCString(sourceline);
+ printf("%s\n", sourceline_string);
+ // Print wavy underline (GetUnderline is deprecated).
+ int start = message->GetStartColumn();
+ for (int i = 0; i < start; i++) {
+ printf(" ");
+ }
+ int end = message->GetEndColumn();
+ for (int i = start; i < end; i++) {
+ printf("^");
+ }
+ printf("\n");
+ }
+}
+
+
+// The callback that is invoked by v8 whenever the JavaScript 'print'
+// function is called. Prints its arguments on stdout separated by
+// spaces and ending with a newline.
+v8::Handle<v8::Value> Print(const v8::Arguments& args) {
+ bool first = true;
+ for (int i = 0; i < args.Length(); i++) {
+ v8::HandleScope handle_scope;
+ if (first) {
+ first = false;
+ } else {
+ printf(" ");
+ }
+ v8::String::Utf8Value str(args[i]);
+ const char* cstr = ToCString(str);
+ printf("%s", cstr);
+ }
+ printf("\n");
+ fflush(stdout);
+ return v8::Undefined();
+}
+
+
+// The callback that is invoked by v8 whenever the JavaScript 'read_line'
+// function is called. Reads a string from standard input and returns.
+v8::Handle<v8::Value> ReadLine(const v8::Arguments& args) {
+ if (args.Length() > 0) {
+ return v8::ThrowException(v8::String::New("Unexpected arguments"));
+ }
+ return ReadLine();
+}
+
+v8::Handle<v8::String> ReadLine() {
+ const int kBufferSize = 1024 + 1;
+ char buffer[kBufferSize];
+
+ char* res;
+ {
+ v8::Unlocker unlocker;
+ res = fgets(buffer, buffer_size, stdin);
+ }
+ if (res == NULL) {
+ v8::Handle<v8::Primitive> t = v8::Undefined();
+ return reinterpret_cast<v8::Handle<v8::String>&>(t);
+ }
+ // remove newline char
+ for (char* pos = buffer; *pos != '\0'; pos++) {
+ if (*pos == '\n') {
+ *pos = '\0';
+ break;
+ }
+ }
+ return v8::String::New(buffer);
+}
diff --git a/src/SConscript b/src/SConscript
index 3b0df17..4eb8722 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -106,7 +106,6 @@
zone.cc
"""),
'arch:arm': Split("""
- arm/assembler-arm.cc
arm/builtins-arm.cc
arm/codegen-arm.cc
arm/constants-arm.cc
@@ -123,6 +122,12 @@
arm/stub-cache-arm.cc
arm/virtual-frame-arm.cc
"""),
+ 'armvariant:arm': Split("""
+ arm/assembler-arm.cc
+ """),
+ 'armvariant:thumb2': Split("""
+ arm/assembler-thumb2.cc
+ """),
'arch:ia32': Split("""
ia32/assembler-ia32.cc
ia32/builtins-ia32.cc
@@ -230,7 +235,7 @@
env.Replace(**context.flags['v8'])
context.ApplyEnvOverrides(env)
env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
- env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE"')
+ env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions')
# Build the standard platform-independent source files.
source_files = context.GetRelevantSources(SOURCES)
diff --git a/src/api.cc b/src/api.cc
index 93807a7..322c90f 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3082,81 +3082,13 @@
}
-static void DisposeExternalString(v8::Persistent<v8::Value> obj,
- void* parameter) {
- ENTER_V8;
- i::ExternalTwoByteString* str =
- i::ExternalTwoByteString::cast(*Utils::OpenHandle(*obj));
-
- // External symbols are deleted when they are pruned out of the symbol
- // table. Generally external symbols are not registered with the weak handle
- // callbacks unless they are upgraded to a symbol after being externalized.
- if (!str->IsSymbol()) {
- v8::String::ExternalStringResource* resource =
- reinterpret_cast<v8::String::ExternalStringResource*>(parameter);
- if (resource != NULL) {
- const int total_size =
- static_cast<int>(resource->length() * sizeof(*resource->data()));
- i::Counters::total_external_string_memory.Decrement(total_size);
-
- // The object will continue to live in the JavaScript heap until the
- // handle is entirely cleaned out by the next GC. For example the
- // destructor for the resource below could bring it back to life again.
- // Which is why we make sure to not have a dangling pointer here.
- str->set_resource(NULL);
- delete resource;
- }
- }
-
- // In any case we do not need this handle any longer.
- obj.Dispose();
-}
-
-
-static void DisposeExternalAsciiString(v8::Persistent<v8::Value> obj,
- void* parameter) {
- ENTER_V8;
- i::ExternalAsciiString* str =
- i::ExternalAsciiString::cast(*Utils::OpenHandle(*obj));
-
- // External symbols are deleted when they are pruned out of the symbol
- // table. Generally external symbols are not registered with the weak handle
- // callbacks unless they are upgraded to a symbol after being externalized.
- if (!str->IsSymbol()) {
- v8::String::ExternalAsciiStringResource* resource =
- reinterpret_cast<v8::String::ExternalAsciiStringResource*>(parameter);
- if (resource != NULL) {
- const int total_size =
- static_cast<int>(resource->length() * sizeof(*resource->data()));
- i::Counters::total_external_string_memory.Decrement(total_size);
-
- // The object will continue to live in the JavaScript heap until the
- // handle is entirely cleaned out by the next GC. For example the
- // destructor for the resource below could bring it back to life again.
- // Which is why we make sure to not have a dangling pointer here.
- str->set_resource(NULL);
- delete resource;
- }
- }
-
- // In any case we do not need this handle any longer.
- obj.Dispose();
-}
-
-
Local<String> v8::String::NewExternal(
v8::String::ExternalStringResource* resource) {
EnsureInitialized("v8::String::NewExternal()");
LOG_API("String::NewExternal");
ENTER_V8;
- const int total_size =
- static_cast<int>(resource->length() * sizeof(*resource->data()));
- i::Counters::total_external_string_memory.Increment(total_size);
i::Handle<i::String> result = NewExternalStringHandle(resource);
- i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
- i::GlobalHandles::MakeWeak(handle.location(),
- resource,
- &DisposeExternalString);
+ i::ExternalStringTable::AddString(*result);
return Utils::ToLocal(result);
}
@@ -3168,13 +3100,7 @@
i::Handle<i::String> obj = Utils::OpenHandle(this);
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
- // Operation was successful and the string is not a symbol. In this case
- // we need to make sure that the we call the destructor for the external
- // resource when no strong references to the string remain.
- i::Handle<i::Object> handle = i::GlobalHandles::Create(*obj);
- i::GlobalHandles::MakeWeak(handle.location(),
- resource,
- &DisposeExternalString);
+ i::ExternalStringTable::AddString(*obj);
}
return result;
}
@@ -3185,14 +3111,8 @@
EnsureInitialized("v8::String::NewExternal()");
LOG_API("String::NewExternal");
ENTER_V8;
- const int total_size =
- static_cast<int>(resource->length() * sizeof(*resource->data()));
- i::Counters::total_external_string_memory.Increment(total_size);
i::Handle<i::String> result = NewExternalAsciiStringHandle(resource);
- i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
- i::GlobalHandles::MakeWeak(handle.location(),
- resource,
- &DisposeExternalAsciiString);
+ i::ExternalStringTable::AddString(*result);
return Utils::ToLocal(result);
}
@@ -3205,13 +3125,7 @@
i::Handle<i::String> obj = Utils::OpenHandle(this);
bool result = obj->MakeExternal(resource);
if (result && !obj->IsSymbol()) {
- // Operation was successful and the string is not a symbol. In this case
- // we need to make sure that the we call the destructor for the external
- // resource when no strong references to the string remain.
- i::Handle<i::Object> handle = i::GlobalHandles::Create(*obj);
- i::GlobalHandles::MakeWeak(handle.location(),
- resource,
- &DisposeExternalAsciiString);
+ i::ExternalStringTable::AddString(*obj);
}
return result;
}
@@ -3755,7 +3669,6 @@
void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
EnsureInitialized("v8::Debug::SetMessageHandler");
ENTER_V8;
- HandleScope scope;
i::Debugger::SetMessageHandler(handler);
}
@@ -3777,10 +3690,10 @@
void Debug::SetDebugMessageDispatchHandler(
- DebugMessageDispatchHandler handler) {
+ DebugMessageDispatchHandler handler, bool provide_locker) {
EnsureInitialized("v8::Debug::SetDebugMessageDispatchHandler");
ENTER_V8;
- i::Debugger::SetDebugMessageDispatchHandler(handler);
+ i::Debugger::SetDebugMessageDispatchHandler(handler, provide_locker);
}
@@ -3827,9 +3740,14 @@
}
-bool Debug::EnableAgent(const char* name, int port) {
- return i::Debugger::StartAgent(name, port);
+bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) {
+ return i::Debugger::StartAgent(name, port, wait_for_connection);
}
+
+void Debug::ProcessDebugMessages() {
+ i::Execution::ProcessDebugMesssages(true);
+}
+
#endif // ENABLE_DEBUGGER_SUPPORT
namespace internal {
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 5f47cb7..fd2fcd3 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -229,14 +229,24 @@
Address Assembler::target_address_address_at(Address pc) {
- Instr instr = Memory::int32_at(pc);
- // Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12].
+ Address target_pc = pc;
+ Instr instr = Memory::int32_at(target_pc);
+ // If we have a bx instruction, the instruction before the bx is
+ // what we need to patch.
+ static const int32_t kBxInstMask = 0x0ffffff0;
+ static const int32_t kBxInstPattern = 0x012fff10;
+ if ((instr & kBxInstMask) == kBxInstPattern) {
+ target_pc -= kInstrSize;
+ instr = Memory::int32_at(target_pc);
+ }
+ // Verify that the instruction to patch is a
+ // ldr<cond> <Rd>, [pc +/- offset_12].
ASSERT((instr & 0x0f7f0000) == 0x051f0000);
int offset = instr & 0xfff; // offset_12 is unsigned
if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
// Verify that the constant pool comes after the instruction referencing it.
ASSERT(offset >= -4);
- return pc + offset + 8;
+ return target_pc + offset + 8;
}
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index d924728..07da800 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -114,55 +114,55 @@
// Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "sN:sM" is the same as "dN/2".
-Register s0 = { 0 };
-Register s1 = { 1 };
-Register s2 = { 2 };
-Register s3 = { 3 };
-Register s4 = { 4 };
-Register s5 = { 5 };
-Register s6 = { 6 };
-Register s7 = { 7 };
-Register s8 = { 8 };
-Register s9 = { 9 };
-Register s10 = { 10 };
-Register s11 = { 11 };
-Register s12 = { 12 };
-Register s13 = { 13 };
-Register s14 = { 14 };
-Register s15 = { 15 };
-Register s16 = { 16 };
-Register s17 = { 17 };
-Register s18 = { 18 };
-Register s19 = { 19 };
-Register s20 = { 20 };
-Register s21 = { 21 };
-Register s22 = { 22 };
-Register s23 = { 23 };
-Register s24 = { 24 };
-Register s25 = { 25 };
-Register s26 = { 26 };
-Register s27 = { 27 };
-Register s28 = { 28 };
-Register s29 = { 29 };
-Register s30 = { 30 };
-Register s31 = { 31 };
+SwVfpRegister s0 = { 0 };
+SwVfpRegister s1 = { 1 };
+SwVfpRegister s2 = { 2 };
+SwVfpRegister s3 = { 3 };
+SwVfpRegister s4 = { 4 };
+SwVfpRegister s5 = { 5 };
+SwVfpRegister s6 = { 6 };
+SwVfpRegister s7 = { 7 };
+SwVfpRegister s8 = { 8 };
+SwVfpRegister s9 = { 9 };
+SwVfpRegister s10 = { 10 };
+SwVfpRegister s11 = { 11 };
+SwVfpRegister s12 = { 12 };
+SwVfpRegister s13 = { 13 };
+SwVfpRegister s14 = { 14 };
+SwVfpRegister s15 = { 15 };
+SwVfpRegister s16 = { 16 };
+SwVfpRegister s17 = { 17 };
+SwVfpRegister s18 = { 18 };
+SwVfpRegister s19 = { 19 };
+SwVfpRegister s20 = { 20 };
+SwVfpRegister s21 = { 21 };
+SwVfpRegister s22 = { 22 };
+SwVfpRegister s23 = { 23 };
+SwVfpRegister s24 = { 24 };
+SwVfpRegister s25 = { 25 };
+SwVfpRegister s26 = { 26 };
+SwVfpRegister s27 = { 27 };
+SwVfpRegister s28 = { 28 };
+SwVfpRegister s29 = { 29 };
+SwVfpRegister s30 = { 30 };
+SwVfpRegister s31 = { 31 };
-Register d0 = { 0 };
-Register d1 = { 1 };
-Register d2 = { 2 };
-Register d3 = { 3 };
-Register d4 = { 4 };
-Register d5 = { 5 };
-Register d6 = { 6 };
-Register d7 = { 7 };
-Register d8 = { 8 };
-Register d9 = { 9 };
-Register d10 = { 10 };
-Register d11 = { 11 };
-Register d12 = { 12 };
-Register d13 = { 13 };
-Register d14 = { 14 };
-Register d15 = { 15 };
+DwVfpRegister d0 = { 0 };
+DwVfpRegister d1 = { 1 };
+DwVfpRegister d2 = { 2 };
+DwVfpRegister d3 = { 3 };
+DwVfpRegister d4 = { 4 };
+DwVfpRegister d5 = { 5 };
+DwVfpRegister d6 = { 6 };
+DwVfpRegister d7 = { 7 };
+DwVfpRegister d8 = { 8 };
+DwVfpRegister d9 = { 9 };
+DwVfpRegister d10 = { 10 };
+DwVfpRegister d11 = { 11 };
+DwVfpRegister d12 = { 12 };
+DwVfpRegister d13 = { 13 };
+DwVfpRegister d14 = { 14 };
+DwVfpRegister d15 = { 15 };
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -1371,11 +1371,10 @@
// Support for VFP.
-void Assembler::fmdrr(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s,
- const Condition cond) {
+void Assembler::vmov(const DwVfpRegister dst,
+ const Register src1,
+ const Register src2,
+ const Condition cond) {
// Dm = <Rt,Rt2>.
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
@@ -1387,11 +1386,10 @@
}
-void Assembler::fmrrd(const Register dst1,
- const Register dst2,
- const Register src,
- const SBit s,
- const Condition cond) {
+void Assembler::vmov(const Register dst1,
+ const Register dst2,
+ const DwVfpRegister src,
+ const Condition cond) {
// <Rt,Rt2> = Dm.
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
@@ -1403,9 +1401,8 @@
}
-void Assembler::fmsr(const Register dst,
+void Assembler::vmov(const SwVfpRegister dst,
const Register src,
- const SBit s,
const Condition cond) {
// Sn = Rt.
// Instruction details available in ARM DDI 0406A, A8-642.
@@ -1418,9 +1415,8 @@
}
-void Assembler::fmrs(const Register dst,
- const Register src,
- const SBit s,
+void Assembler::vmov(const Register dst,
+ const SwVfpRegister src,
const Condition cond) {
// Rt = Sn.
// Instruction details available in ARM DDI 0406A, A8-642.
@@ -1433,10 +1429,9 @@
}
-void Assembler::fsitod(const Register dst,
- const Register src,
- const SBit s,
- const Condition cond) {
+void Assembler::vcvt(const DwVfpRegister dst,
+ const SwVfpRegister src,
+ const Condition cond) {
// Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
// Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
@@ -1448,10 +1443,9 @@
}
-void Assembler::ftosid(const Register dst,
- const Register src,
- const SBit s,
- const Condition cond) {
+void Assembler::vcvt(const SwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond) {
// Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
// Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
@@ -1463,12 +1457,11 @@
}
-void Assembler::faddd(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s,
- const Condition cond) {
- // Dd = faddd(Dn, Dm) double precision floating point addition.
+void Assembler::vadd(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Dd = vadd(Dn, Dm) double precision floating point addition.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-536.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
@@ -1479,12 +1472,11 @@
}
-void Assembler::fsubd(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s,
- const Condition cond) {
- // Dd = fsubd(Dn, Dm) double precision floating point subtraction.
+void Assembler::vsub(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Dd = vsub(Dn, Dm) double precision floating point subtraction.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
@@ -1495,12 +1487,11 @@
}
-void Assembler::fmuld(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s,
- const Condition cond) {
- // Dd = fmuld(Dn, Dm) double precision floating point multiplication.
+void Assembler::vmul(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Dd = vmul(Dn, Dm) double precision floating point multiplication.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
@@ -1511,12 +1502,11 @@
}
-void Assembler::fdivd(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s,
- const Condition cond) {
- // Dd = fdivd(Dn, Dm) double precision floating point division.
+void Assembler::vdiv(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Dd = vdiv(Dn, Dm) double precision floating point division.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-584.
// cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
@@ -1527,8 +1517,8 @@
}
-void Assembler::fcmp(const Register src1,
- const Register src2,
+void Assembler::vcmp(const DwVfpRegister src1,
+ const DwVfpRegister src2,
const SBit s,
const Condition cond) {
// vcmp(Dd, Dm) double precision floating point comparison.
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 86bc18a..cd53dd6 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -103,57 +103,94 @@
extern Register lr;
extern Register pc;
-// Support for VFP registers s0 to s32 (d0 to d16).
-// Note that "sN:sM" is the same as "dN/2".
-extern Register s0;
-extern Register s1;
-extern Register s2;
-extern Register s3;
-extern Register s4;
-extern Register s5;
-extern Register s6;
-extern Register s7;
-extern Register s8;
-extern Register s9;
-extern Register s10;
-extern Register s11;
-extern Register s12;
-extern Register s13;
-extern Register s14;
-extern Register s15;
-extern Register s16;
-extern Register s17;
-extern Register s18;
-extern Register s19;
-extern Register s20;
-extern Register s21;
-extern Register s22;
-extern Register s23;
-extern Register s24;
-extern Register s25;
-extern Register s26;
-extern Register s27;
-extern Register s28;
-extern Register s29;
-extern Register s30;
-extern Register s31;
-extern Register d0;
-extern Register d1;
-extern Register d2;
-extern Register d3;
-extern Register d4;
-extern Register d5;
-extern Register d6;
-extern Register d7;
-extern Register d8;
-extern Register d9;
-extern Register d10;
-extern Register d11;
-extern Register d12;
-extern Register d13;
-extern Register d14;
-extern Register d15;
+// Single word VFP register.
+struct SwVfpRegister {
+ bool is_valid() const { return 0 <= code_ && code_ < 32; }
+ bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ int code_;
+};
+
+
+// Double word VFP register.
+struct DwVfpRegister {
+ // Supporting d0 to d15, can be later extended to d31.
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ int code_;
+};
+
+
+// Support for VFP registers s0 to s31 (d0 to d15).
+// Note that "s(N):s(N+1)" is the same as "d(N/2)".
+extern SwVfpRegister s0;
+extern SwVfpRegister s1;
+extern SwVfpRegister s2;
+extern SwVfpRegister s3;
+extern SwVfpRegister s4;
+extern SwVfpRegister s5;
+extern SwVfpRegister s6;
+extern SwVfpRegister s7;
+extern SwVfpRegister s8;
+extern SwVfpRegister s9;
+extern SwVfpRegister s10;
+extern SwVfpRegister s11;
+extern SwVfpRegister s12;
+extern SwVfpRegister s13;
+extern SwVfpRegister s14;
+extern SwVfpRegister s15;
+extern SwVfpRegister s16;
+extern SwVfpRegister s17;
+extern SwVfpRegister s18;
+extern SwVfpRegister s19;
+extern SwVfpRegister s20;
+extern SwVfpRegister s21;
+extern SwVfpRegister s22;
+extern SwVfpRegister s23;
+extern SwVfpRegister s24;
+extern SwVfpRegister s25;
+extern SwVfpRegister s26;
+extern SwVfpRegister s27;
+extern SwVfpRegister s28;
+extern SwVfpRegister s29;
+extern SwVfpRegister s30;
+extern SwVfpRegister s31;
+
+extern DwVfpRegister d0;
+extern DwVfpRegister d1;
+extern DwVfpRegister d2;
+extern DwVfpRegister d3;
+extern DwVfpRegister d4;
+extern DwVfpRegister d5;
+extern DwVfpRegister d6;
+extern DwVfpRegister d7;
+extern DwVfpRegister d8;
+extern DwVfpRegister d9;
+extern DwVfpRegister d10;
+extern DwVfpRegister d11;
+extern DwVfpRegister d12;
+extern DwVfpRegister d13;
+extern DwVfpRegister d14;
+extern DwVfpRegister d15;
+
// Coprocessor register
struct CRegister {
@@ -759,55 +796,45 @@
// However, some simple modifications can allow
// these APIs to support D16 to D31.
- void fmdrr(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void fmrrd(const Register dst1,
- const Register dst2,
- const Register src,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void fmsr(const Register dst,
- const Register src,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void fmrs(const Register dst,
- const Register src,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void fsitod(const Register dst,
- const Register src,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void ftosid(const Register dst,
- const Register src,
- const SBit s = LeaveCC,
- const Condition cond = al);
-
- void faddd(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void fsubd(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void fmuld(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void fdivd(const Register dst,
- const Register src1,
- const Register src2,
- const SBit s = LeaveCC,
- const Condition cond = al);
- void fcmp(const Register src1,
+ void vmov(const DwVfpRegister dst,
+ const Register src1,
const Register src2,
+ const Condition cond = al);
+ void vmov(const Register dst1,
+ const Register dst2,
+ const DwVfpRegister src,
+ const Condition cond = al);
+ void vmov(const SwVfpRegister dst,
+ const Register src,
+ const Condition cond = al);
+ void vmov(const Register dst,
+ const SwVfpRegister src,
+ const Condition cond = al);
+ void vcvt(const DwVfpRegister dst,
+ const SwVfpRegister src,
+ const Condition cond = al);
+ void vcvt(const SwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond = al);
+
+ void vadd(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vsub(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vmul(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vdiv(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vcmp(const DwVfpRegister src1,
+ const DwVfpRegister src2,
const SBit s = LeaveCC,
const Condition cond = al);
void vmrs(const Register dst,
diff --git a/src/arm/assembler-thumb2-inl.h b/src/arm/assembler-thumb2-inl.h
new file mode 100644
index 0000000..3808ef0
--- /dev/null
+++ b/src/arm/assembler-thumb2-inl.h
@@ -0,0 +1,267 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+#ifndef V8_ARM_ASSEMBLER_THUMB2_INL_H_
+#define V8_ARM_ASSEMBLER_THUMB2_INL_H_
+
+#include "arm/assembler-thumb2.h"
+#include "cpu.h"
+
+
+namespace v8 {
+namespace internal {
+
+Condition NegateCondition(Condition cc) {
+ ASSERT(cc != al);
+ return static_cast<Condition>(cc ^ ne);
+}
+
+
+void RelocInfo::apply(intptr_t delta) {
+ if (RelocInfo::IsInternalReference(rmode_)) {
+ // absolute code pointer inside code object moves with the code object.
+ int32_t* p = reinterpret_cast<int32_t*>(pc_);
+ *p += delta; // relocate entry
+ }
+ // We do not use pc relative addressing on ARM, so there is
+ // nothing else to do.
+}
+
+
+Address RelocInfo::target_address() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ return Assembler::target_address_at(pc_);
+}
+
+
+Address RelocInfo::target_address_address() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
+}
+
+
+void RelocInfo::set_target_address(Address target) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ Assembler::set_target_address_at(pc_, target);
+}
+
+
+Object* RelocInfo::target_object() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Memory::Object_at(Assembler::target_address_address_at(pc_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
+}
+
+
+Object** RelocInfo::target_object_address() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+}
+
+
+Address* RelocInfo::target_reference_address() {
+ ASSERT(rmode_ == EXTERNAL_REFERENCE);
+ return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
+}
+
+
+Address RelocInfo::call_address() {
+ ASSERT(IsPatchedReturnSequence());
+ // The 2 instructions offset assumes patched return sequence.
+ ASSERT(IsJSReturn(rmode()));
+ return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ ASSERT(IsPatchedReturnSequence());
+ // The 2 instructions offset assumes patched return sequence.
+ ASSERT(IsJSReturn(rmode()));
+ Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+}
+
+
+Object* RelocInfo::call_object() {
+ return *call_object_address();
+}
+
+
+Object** RelocInfo::call_object_address() {
+ ASSERT(IsPatchedReturnSequence());
+ // The 2 instructions offset assumes patched return sequence.
+ ASSERT(IsJSReturn(rmode()));
+ return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+ *call_object_address() = target;
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+ // On ARM a "call instruction" is actually two instructions.
+ // mov lr, pc
+ // ldr pc, [pc, #XXX]
+ return (Assembler::instr_at(pc_) == kMovLrPc)
+ && ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
+ == kLdrPCPattern);
+}
+
+
+Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
+ rm_ = no_reg;
+ imm32_ = immediate;
+ rmode_ = rmode;
+}
+
+
+Operand::Operand(const char* s) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(s);
+ rmode_ = RelocInfo::EMBEDDED_STRING;
+}
+
+
+Operand::Operand(const ExternalReference& f) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(f.address());
+ rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+
+Operand::Operand(Object** opp) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(opp);
+ rmode_ = RelocInfo::NONE;
+}
+
+
+Operand::Operand(Context** cpp) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(cpp);
+ rmode_ = RelocInfo::NONE;
+}
+
+
+Operand::Operand(Smi* value) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<intptr_t>(value);
+ rmode_ = RelocInfo::NONE;
+}
+
+
+Operand::Operand(Register rm) {
+ rm_ = rm;
+ rs_ = no_reg;
+ shift_op_ = LSL;
+ shift_imm_ = 0;
+}
+
+
+bool Operand::is_reg() const {
+ return rm_.is_valid() &&
+ rs_.is(no_reg) &&
+ shift_op_ == LSL &&
+ shift_imm_ == 0;
+}
+
+
+void Assembler::CheckBuffer() {
+ if (buffer_space() <= kGap) {
+ GrowBuffer();
+ }
+ if (pc_offset() >= next_buffer_check_) {
+ CheckConstPool(false, true);
+ }
+}
+
+
+void Assembler::emit(Instr x) {
+ CheckBuffer();
+ *reinterpret_cast<Instr*>(pc_) = x;
+ pc_ += kInstrSize;
+}
+
+
+Address Assembler::target_address_address_at(Address pc) {
+ Instr instr = Memory::int32_at(pc);
+ // Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12].
+ ASSERT((instr & 0x0f7f0000) == 0x051f0000);
+ int offset = instr & 0xfff; // offset_12 is unsigned
+ if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
+ // Verify that the constant pool comes after the instruction referencing it.
+ ASSERT(offset >= -4);
+ return pc + offset + 8;
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+ return Memory::Address_at(target_address_address_at(pc));
+}
+
+
+void Assembler::set_target_at(Address constant_pool_entry,
+ Address target) {
+ Memory::Address_at(constant_pool_entry) = target;
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+ Memory::Address_at(target_address_address_at(pc)) = target;
+ // Intuitively, we would think it is necessary to flush the instruction cache
+ // after patching a target address in the code as follows:
+ // CPU::FlushICache(pc, sizeof(target));
+ // However, on ARM, no instruction was actually patched by the assignment
+ // above; the target address is not part of an instruction, it is patched in
+ // the constant pool and is read via a data access; the instruction accessing
+ // this address in the constant pool remains unchanged.
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_ASSEMBLER_THUMB2_INL_H_
diff --git a/src/arm/assembler-thumb2.cc b/src/arm/assembler-thumb2.cc
new file mode 100644
index 0000000..6c2b903
--- /dev/null
+++ b/src/arm/assembler-thumb2.cc
@@ -0,0 +1,1821 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+#include "v8.h"
+
+#include "arm/assembler-thumb2-inl.h"
+#include "serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// Safe default is no features.
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::enabled_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+
+void CpuFeatures::Probe() {
+ // If the compiler is allowed to use vfp then we can use vfp too in our
+ // code generation.
+#if !defined(__arm__)
+ // For the simulator=arm build, always use VFP since the arm simulator has
+ // VFP support.
+ supported_ |= 1u << VFP3;
+#else
+ if (Serializer::enabled()) {
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
+ return; // No features if we might serialize.
+ }
+
+ if (OS::ArmCpuHasFeature(VFP3)) {
+ // This implementation also sets the VFP flags if
+ // runtime detection of VFP returns true.
+ supported_ |= 1u << VFP3;
+ found_by_runtime_probing_ |= 1u << VFP3;
+ }
+#endif
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and CRegister
+
+Register no_reg = { -1 };
+
+Register r0 = { 0 };
+Register r1 = { 1 };
+Register r2 = { 2 };
+Register r3 = { 3 };
+Register r4 = { 4 };
+Register r5 = { 5 };
+Register r6 = { 6 };
+Register r7 = { 7 };
+Register r8 = { 8 };
+Register r9 = { 9 };
+Register r10 = { 10 };
+Register fp = { 11 };
+Register ip = { 12 };
+Register sp = { 13 };
+Register lr = { 14 };
+Register pc = { 15 };
+
+
+CRegister no_creg = { -1 };
+
+CRegister cr0 = { 0 };
+CRegister cr1 = { 1 };
+CRegister cr2 = { 2 };
+CRegister cr3 = { 3 };
+CRegister cr4 = { 4 };
+CRegister cr5 = { 5 };
+CRegister cr6 = { 6 };
+CRegister cr7 = { 7 };
+CRegister cr8 = { 8 };
+CRegister cr9 = { 9 };
+CRegister cr10 = { 10 };
+CRegister cr11 = { 11 };
+CRegister cr12 = { 12 };
+CRegister cr13 = { 13 };
+CRegister cr14 = { 14 };
+CRegister cr15 = { 15 };
+
+// Support for the VFP registers s0 to s31 (d0 to d15).
+// Note that "sN:sM" is the same as "dN/2".
+SwVfpRegister s0 = { 0 };
+SwVfpRegister s1 = { 1 };
+SwVfpRegister s2 = { 2 };
+SwVfpRegister s3 = { 3 };
+SwVfpRegister s4 = { 4 };
+SwVfpRegister s5 = { 5 };
+SwVfpRegister s6 = { 6 };
+SwVfpRegister s7 = { 7 };
+SwVfpRegister s8 = { 8 };
+SwVfpRegister s9 = { 9 };
+SwVfpRegister s10 = { 10 };
+SwVfpRegister s11 = { 11 };
+SwVfpRegister s12 = { 12 };
+SwVfpRegister s13 = { 13 };
+SwVfpRegister s14 = { 14 };
+SwVfpRegister s15 = { 15 };
+SwVfpRegister s16 = { 16 };
+SwVfpRegister s17 = { 17 };
+SwVfpRegister s18 = { 18 };
+SwVfpRegister s19 = { 19 };
+SwVfpRegister s20 = { 20 };
+SwVfpRegister s21 = { 21 };
+SwVfpRegister s22 = { 22 };
+SwVfpRegister s23 = { 23 };
+SwVfpRegister s24 = { 24 };
+SwVfpRegister s25 = { 25 };
+SwVfpRegister s26 = { 26 };
+SwVfpRegister s27 = { 27 };
+SwVfpRegister s28 = { 28 };
+SwVfpRegister s29 = { 29 };
+SwVfpRegister s30 = { 30 };
+SwVfpRegister s31 = { 31 };
+
+DwVfpRegister d0 = { 0 };
+DwVfpRegister d1 = { 1 };
+DwVfpRegister d2 = { 2 };
+DwVfpRegister d3 = { 3 };
+DwVfpRegister d4 = { 4 };
+DwVfpRegister d5 = { 5 };
+DwVfpRegister d6 = { 6 };
+DwVfpRegister d7 = { 7 };
+DwVfpRegister d8 = { 8 };
+DwVfpRegister d9 = { 9 };
+DwVfpRegister d10 = { 10 };
+DwVfpRegister d11 = { 11 };
+DwVfpRegister d12 = { 12 };
+DwVfpRegister d13 = { 13 };
+DwVfpRegister d14 = { 14 };
+DwVfpRegister d15 = { 15 };
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+const int RelocInfo::kApplyMask = 0;
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+ // Patch the code at the current address with the supplied instructions.
+ Instr* pc = reinterpret_cast<Instr*>(pc_);
+ Instr* instr = reinterpret_cast<Instr*>(instructions);
+ for (int i = 0; i < instruction_count; i++) {
+ *(pc + i) = *(instr + i);
+ }
+
+ // Indicate that code has changed.
+ CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+ // Patch the code at the current address with a call to the target.
+ UNIMPLEMENTED();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand
+// See assembler-thumb2-inl.h for inlined constructors
+
+Operand::Operand(Handle<Object> handle) {
+ rm_ = no_reg;
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ ASSERT(!Heap::InNewSpace(obj));
+ if (obj->IsHeapObject()) {
+ imm32_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ } else {
+ // no relocation needed
+ imm32_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = RelocInfo::NONE;
+ }
+}
+
+
+Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
+ ASSERT(is_uint5(shift_imm));
+ ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
+ rm_ = rm;
+ rs_ = no_reg;
+ shift_op_ = shift_op;
+ shift_imm_ = shift_imm & 31;
+ if (shift_op == RRX) {
+ // encoded as ROR with shift_imm == 0
+ ASSERT(shift_imm == 0);
+ shift_op_ = ROR;
+ shift_imm_ = 0;
+ }
+}
+
+
+Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
+ ASSERT(shift_op != RRX);
+ rm_ = rm;
+ rs_ = no_reg;
+ shift_op_ = shift_op;
+ rs_ = rs;
+}
+
+
+MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
+ rn_ = rn;
+ rm_ = no_reg;
+ offset_ = offset;
+ am_ = am;
+}
+
+MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
+ rn_ = rn;
+ rm_ = rm;
+ shift_op_ = LSL;
+ shift_imm_ = 0;
+ am_ = am;
+}
+
+
+MemOperand::MemOperand(Register rn, Register rm,
+ ShiftOp shift_op, int shift_imm, AddrMode am) {
+ ASSERT(is_uint5(shift_imm));
+ rn_ = rn;
+ rm_ = rm;
+ shift_op_ = shift_op;
+ shift_imm_ = shift_imm & 31;
+ am_ = am;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+// Instruction encoding bits
+enum {
+ H = 1 << 5, // halfword (or byte)
+ S6 = 1 << 6, // signed (or unsigned)
+ L = 1 << 20, // load (or store)
+ S = 1 << 20, // set condition code (or leave unchanged)
+ W = 1 << 21, // writeback base register (or leave unchanged)
+ A = 1 << 21, // accumulate in multiply instruction (or not)
+ B = 1 << 22, // unsigned byte (or word)
+ N = 1 << 22, // long (or short)
+ U = 1 << 23, // positive (or negative) offset/index
+ P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
+ I = 1 << 25, // immediate shifter operand (or not)
+
+ B4 = 1 << 4,
+ B5 = 1 << 5,
+ B6 = 1 << 6,
+ B7 = 1 << 7,
+ B8 = 1 << 8,
+ B9 = 1 << 9,
+ B12 = 1 << 12,
+ B16 = 1 << 16,
+ B18 = 1 << 18,
+ B19 = 1 << 19,
+ B20 = 1 << 20,
+ B21 = 1 << 21,
+ B22 = 1 << 22,
+ B23 = 1 << 23,
+ B24 = 1 << 24,
+ B25 = 1 << 25,
+ B26 = 1 << 26,
+ B27 = 1 << 27,
+
+ // Instruction bit masks
+ RdMask = 15 << 12, // in str instruction
+ CondMask = 15 << 28,
+ CoprocessorMask = 15 << 8,
+ OpCodeMask = 15 << 21, // in data-processing instructions
+ Imm24Mask = (1 << 24) - 1,
+ Off12Mask = (1 << 12) - 1,
+ // Reserved condition
+ nv = 15 << 28
+};
+
+
+// add(sp, sp, 4) instruction (aka Pop())
+static const Instr kPopInstruction =
+ al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
+// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
+// register r is not encoded.
+static const Instr kPushRegPattern =
+ al | B26 | 4 | NegPreIndex | sp.code() * B16;
+// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
+// register r is not encoded.
+static const Instr kPopRegPattern =
+ al | B26 | L | 4 | PostIndex | sp.code() * B16;
+// mov lr, pc
+const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
+// ldr pc, [pc, #XXX]
+const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
+
+// spare_buffer_
+static const int kMinimalBufferSize = 4*KB;
+static byte* spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+ if (buffer == NULL) {
+ // do our own buffer management
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+
+ if (spare_buffer_ != NULL) {
+ buffer = spare_buffer_;
+ spare_buffer_ = NULL;
+ }
+ }
+ if (buffer == NULL) {
+ buffer_ = NewArray<byte>(buffer_size);
+ } else {
+ buffer_ = static_cast<byte*>(buffer);
+ }
+ buffer_size_ = buffer_size;
+ own_buffer_ = true;
+
+ } else {
+ // use externally provided buffer instead
+ ASSERT(buffer_size > 0);
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+ own_buffer_ = false;
+ }
+
+ // setup buffer pointers
+ ASSERT(buffer_ != NULL);
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+ num_prinfo_ = 0;
+ next_buffer_check_ = 0;
+ no_const_pool_before_ = 0;
+ last_const_pool_end_ = 0;
+ last_bound_pos_ = 0;
+ current_statement_position_ = RelocInfo::kNoPosition;
+ current_position_ = RelocInfo::kNoPosition;
+ written_statement_position_ = current_statement_position_;
+ written_position_ = current_position_;
+}
+
+
+Assembler::~Assembler() {
+ if (own_buffer_) {
+ if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+ spare_buffer_ = buffer_;
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // emit constant pool if necessary
+ CheckConstPool(true, false);
+ ASSERT(num_prinfo_ == 0);
+
+ // setup desc
+ desc->buffer = buffer_;
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(m >= 4 && IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+
+// The link chain is terminated by a negative code position (must be aligned)
+const int kEndOfChain = -4;
+
+
+int Assembler::target_at(int pos) {
+ Instr instr = instr_at(pos);
+ if ((instr & ~Imm24Mask) == 0) {
+ // Emitted label constant, not part of a branch.
+ return instr - (Code::kHeaderSize - kHeapObjectTag);
+ }
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
+ int imm26 = ((instr & Imm24Mask) << 8) >> 6;
+ if ((instr & CondMask) == nv && (instr & B24) != 0)
+ // blx uses bit 24 to encode bit 2 of imm26
+ imm26 += 2;
+
+ return pos + kPcLoadDelta + imm26;
+}
+
+
+void Assembler::target_at_put(int pos, int target_pos) {
+ Instr instr = instr_at(pos);
+ if ((instr & ~Imm24Mask) == 0) {
+ ASSERT(target_pos == kEndOfChain || target_pos >= 0);
+ // Emitted label constant, not part of a branch.
+ // Make label relative to Code* of generated Code object.
+ instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ return;
+ }
+ int imm26 = target_pos - (pos + kPcLoadDelta);
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
+ if ((instr & CondMask) == nv) {
+ // blx uses bit 24 to encode bit 2 of imm26
+ ASSERT((imm26 & 1) == 0);
+ instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
+ } else {
+ ASSERT((imm26 & 3) == 0);
+ instr &= ~Imm24Mask;
+ }
+ int imm24 = imm26 >> 2;
+ ASSERT(is_int24(imm24));
+ instr_at_put(pos, instr | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::print(Label* L) {
+ if (L->is_unused()) {
+ PrintF("unused label\n");
+ } else if (L->is_bound()) {
+ PrintF("bound label to %d\n", L->pos());
+ } else if (L->is_linked()) {
+ Label l = *L;
+ PrintF("unbound label");
+ while (l.is_linked()) {
+ PrintF("@ %d ", l.pos());
+ Instr instr = instr_at(l.pos());
+ if ((instr & ~Imm24Mask) == 0) {
+ PrintF("value\n");
+ } else {
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
+ int cond = instr & CondMask;
+ const char* b;
+ const char* c;
+ if (cond == nv) {
+ b = "blx";
+ c = "";
+ } else {
+ if ((instr & B24) != 0)
+ b = "bl";
+ else
+ b = "b";
+
+ switch (cond) {
+ case eq: c = "eq"; break;
+ case ne: c = "ne"; break;
+ case hs: c = "hs"; break;
+ case lo: c = "lo"; break;
+ case mi: c = "mi"; break;
+ case pl: c = "pl"; break;
+ case vs: c = "vs"; break;
+ case vc: c = "vc"; break;
+ case hi: c = "hi"; break;
+ case ls: c = "ls"; break;
+ case ge: c = "ge"; break;
+ case lt: c = "lt"; break;
+ case gt: c = "gt"; break;
+ case le: c = "le"; break;
+ case al: c = ""; break;
+ default:
+ c = "";
+ UNREACHABLE();
+ }
+ }
+ PrintF("%s%s\n", b, c);
+ }
+ next(&l);
+ }
+ } else {
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+ }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+ ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
+ while (L->is_linked()) {
+ int fixup_pos = L->pos();
+ next(L); // call next before overwriting link with target at fixup_pos
+ target_at_put(fixup_pos, pos);
+ }
+ L->bind_to(pos);
+
+ // Keep track of the last bound label so we don't eliminate any instructions
+ // before a bound label.
+ if (pos > last_bound_pos_)
+ last_bound_pos_ = pos;
+}
+
+
+void Assembler::link_to(Label* L, Label* appendix) {
+ if (appendix->is_linked()) {
+ if (L->is_linked()) {
+ // append appendix to L's list
+ int fixup_pos;
+ int link = L->pos();
+ do {
+ fixup_pos = link;
+ link = target_at(fixup_pos);
+ } while (link > 0);
+ ASSERT(link == kEndOfChain);
+ target_at_put(fixup_pos, appendix->pos());
+ } else {
+ // L is empty, simply use appendix
+ *L = *appendix;
+ }
+ }
+ appendix->Unuse(); // appendix should not be used anymore
+}
+
+
+void Assembler::bind(Label* L) {
+ ASSERT(!L->is_bound()); // label can only be bound once
+ bind_to(L, pc_offset());
+}
+
+
+void Assembler::next(Label* L) {
+ ASSERT(L->is_linked());
+ int link = target_at(L->pos());
+ if (link > 0) {
+ L->link_to(link);
+ } else {
+ ASSERT(link == kEndOfChain);
+ L->Unuse();
+ }
+}
+
+
+// Low-level code emission routines depending on the addressing mode
+static bool fits_shifter(uint32_t imm32,
+ uint32_t* rotate_imm,
+ uint32_t* immed_8,
+ Instr* instr) {
+ // imm32 must be unsigned
+ for (int rot = 0; rot < 16; rot++) {
+ uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
+ if ((imm8 <= 0xff)) {
+ *rotate_imm = rot;
+ *immed_8 = imm8;
+ return true;
+ }
+ }
+ // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
+ if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
+ if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= 0x2*B21;
+ return true;
+ }
+ }
+ return false;
+}
+
+
+// We have to use the temporary register for things that can be relocated even
+// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
+// space. There is no guarantee that the relocated location can be similarly
+// encoded.
+static bool MustUseIp(RelocInfo::Mode rmode) {
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ return Serializer::enabled();
+ } else if (rmode == RelocInfo::NONE) {
+ return false;
+ }
+ return true;
+}
+
+
+void Assembler::addrmod1(Instr instr,
+ Register rn,
+ Register rd,
+ const Operand& x) {
+ CheckBuffer();
+ ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
+ if (!x.rm_.is_valid()) {
+ // immediate
+ uint32_t rotate_imm;
+ uint32_t immed_8;
+ if (MustUseIp(x.rmode_) ||
+ !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
+ // The immediate operand cannot be encoded as a shifter operand, so load
+ // it first to register ip and change the original instruction to use ip.
+ // However, if the original instruction is a 'mov rd, x' (not setting the
+ // condition code), then replace it with a 'ldr rd, [pc]'
+ RecordRelocInfo(x.rmode_, x.imm32_);
+ CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
+ Condition cond = static_cast<Condition>(instr & CondMask);
+ if ((instr & ~CondMask) == 13*B21) { // mov, S not set
+ ldr(rd, MemOperand(pc, 0), cond);
+ } else {
+ ldr(ip, MemOperand(pc, 0), cond);
+ addrmod1(instr, rn, rd, Operand(ip));
+ }
+ return;
+ }
+ instr |= I | rotate_imm*B8 | immed_8;
+ } else if (!x.rs_.is_valid()) {
+ // immediate shift
+ instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
+ } else {
+ // register shift
+ ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
+ instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
+ }
+ emit(instr | rn.code()*B16 | rd.code()*B12);
+ if (rn.is(pc) || x.rm_.is(pc))
+ // block constant pool emission for one instruction after reading pc
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
+}
+
+
+void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
+ ASSERT((instr & ~(CondMask | B | L)) == B26);
+ int am = x.am_;
+ if (!x.rm_.is_valid()) {
+ // immediate offset
+ int offset_12 = x.offset_;
+ if (offset_12 < 0) {
+ offset_12 = -offset_12;
+ am ^= U;
+ }
+ if (!is_uint12(offset_12)) {
+ // immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ mov(ip, Operand(x.offset_), LeaveCC,
+ static_cast<Condition>(instr & CondMask));
+ addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ return;
+ }
+ ASSERT(offset_12 >= 0); // no masking needed
+ instr |= offset_12;
+ } else {
+ // register offset (shift_imm_ and shift_op_ are 0) or scaled
+ // register offset the constructors make sure than both shift_imm_
+ // and shift_op_ are initialized
+ ASSERT(!x.rm_.is(pc));
+ instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
+ }
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+ emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
+}
+
+
+void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
+ ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
+ ASSERT(x.rn_.is_valid());
+ int am = x.am_;
+ if (!x.rm_.is_valid()) {
+ // immediate offset
+ int offset_8 = x.offset_;
+ if (offset_8 < 0) {
+ offset_8 = -offset_8;
+ am ^= U;
+ }
+ if (!is_uint8(offset_8)) {
+ // immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ mov(ip, Operand(x.offset_), LeaveCC,
+ static_cast<Condition>(instr & CondMask));
+ addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ return;
+ }
+ ASSERT(offset_8 >= 0); // no masking needed
+ instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
+ } else if (x.shift_imm_ != 0) {
+ // scaled register offset not supported, load index first
+ // rn (and rd in a load) should never be ip, or will be trashed
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
+ static_cast<Condition>(instr & CondMask));
+ addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ return;
+ } else {
+ // register offset
+ ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
+ instr |= x.rm_.code();
+ }
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+ emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
+}
+
+
+void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
+ ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
+ ASSERT(rl != 0);
+ ASSERT(!rn.is(pc));
+ emit(instr | rn.code()*B16 | rl);
+}
+
+
+void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
+ // unindexed addressing is not encoded by this function
+ ASSERT_EQ((B27 | B26),
+ (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
+ ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
+ int am = x.am_;
+ int offset_8 = x.offset_;
+ ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
+ offset_8 >>= 2;
+ if (offset_8 < 0) {
+ offset_8 = -offset_8;
+ am ^= U;
+ }
+ ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+
+ // post-indexed addressing requires W == 1; different than in addrmod2/3
+ if ((am & P) == 0)
+ am |= W;
+
+ ASSERT(offset_8 >= 0); // no masking needed
+ emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
+}
+
+
+int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
+ int target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link
+ } else {
+ target_pos = kEndOfChain;
+ }
+ L->link_to(pc_offset());
+ }
+
+ // Block the emission of the constant pool, since the branch instruction must
+ // be emitted at the pc offset recorded by the label
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
+ return target_pos - (pc_offset() + kPcLoadDelta);
+}
+
+
+void Assembler::label_at_put(Label* L, int at_offset) {
+ int target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link
+ } else {
+ target_pos = kEndOfChain;
+ }
+ L->link_to(at_offset);
+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ }
+}
+
+
+// Branch instructions
+void Assembler::b(int branch_offset, Condition cond) {
+ ASSERT((branch_offset & 3) == 0);
+ int imm24 = branch_offset >> 2;
+ ASSERT(is_int24(imm24));
+ emit(cond | B27 | B25 | (imm24 & Imm24Mask));
+
+ if (cond == al)
+ // dead code is a good location to emit the constant pool
+ CheckConstPool(false, false);
+}
+
+
+void Assembler::bl(int branch_offset, Condition cond) {
+ ASSERT((branch_offset & 3) == 0);
+ int imm24 = branch_offset >> 2;
+ ASSERT(is_int24(imm24));
+ emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::blx(int branch_offset) { // v5 and above
+ WriteRecordedPositions();
+ ASSERT((branch_offset & 1) == 0);
+ int h = ((branch_offset & 2) >> 1)*B24;
+ int imm24 = branch_offset >> 2;
+ ASSERT(is_int24(imm24));
+ emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::blx(Register target, Condition cond) { // v5 and above
+ WriteRecordedPositions();
+ ASSERT(!target.is(pc));
+ emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
+}
+
+
+void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
+ WriteRecordedPositions();
+ ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
+ emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
+}
+
+
+// Data-processing instructions
+void Assembler::and_(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 0*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::eor(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 1*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::sub(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 2*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::rsb(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 3*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::add(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 4*B21 | s, src1, dst, src2);
+
+ // Eliminate pattern: push(r), pop()
+ // str(src, MemOperand(sp, 4, NegPreIndex), al);
+ // add(sp, sp, Operand(kPointerSize));
+ // Both instructions can be eliminated.
+ int pattern_size = 2 * kInstrSize;
+ if (FLAG_push_pop_elimination &&
+ last_bound_pos_ <= (pc_offset() - pattern_size) &&
+ reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+ // pattern
+ instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
+ (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
+ pc_ -= 2 * kInstrSize;
+ if (FLAG_print_push_pop_elimination) {
+ PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
+ }
+ }
+}
+
+
+void Assembler::adc(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 5*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::sbc(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 6*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::rsc(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 7*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 8*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 9*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 10*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 11*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::orr(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 12*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
+ if (dst.is(pc)) {
+ WriteRecordedPositions();
+ }
+ addrmod1(cond | 13*B21 | s, r0, dst, src);
+}
+
+
+void Assembler::bic(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 14*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
+ addrmod1(cond | 15*B21 | s, r0, dst, src);
+}
+
+
+// Multiply instructions
+void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
+ SBit s, Condition cond) {
+ ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+ emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::mul(Register dst, Register src1, Register src2,
+ SBit s, Condition cond) {
+ ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+ // dst goes in bits 16-19 for this instruction!
+ emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::smlal(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH));
+ emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::smull(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH));
+ emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::umlal(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH));
+ emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::umull(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH));
+ emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+// Miscellaneous arithmetic instructions
+void Assembler::clz(Register dst, Register src, Condition cond) {
+ // v5 and above.
+ ASSERT(!dst.is(pc) && !src.is(pc));
+ emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
+ 15*B8 | B4 | src.code());
+}
+
+
+// Status register access instructions
+void Assembler::mrs(Register dst, SRegister s, Condition cond) {
+ ASSERT(!dst.is(pc));
+ emit(cond | B24 | s | 15*B16 | dst.code()*B12);
+}
+
+
+void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
+ Condition cond) {
+ ASSERT(fields >= B16 && fields < B20); // at least one field set
+ Instr instr;
+ if (!src.rm_.is_valid()) {
+ // immediate
+ uint32_t rotate_imm;
+ uint32_t immed_8;
+ if (MustUseIp(src.rmode_) ||
+ !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
+ // immediate operand cannot be encoded, load it first to register ip
+ RecordRelocInfo(src.rmode_, src.imm32_);
+ ldr(ip, MemOperand(pc, 0), cond);
+ msr(fields, Operand(ip), cond);
+ return;
+ }
+ instr = I | rotate_imm*B8 | immed_8;
+ } else {
+ ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
+ instr = src.rm_.code();
+ }
+ emit(cond | instr | B24 | B21 | fields | 15*B12);
+}
+
+
+// Load/Store instructions
+void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
+ if (dst.is(pc)) {
+ WriteRecordedPositions();
+ }
+ addrmod2(cond | B26 | L, dst, src);
+
+ // Eliminate pattern: push(r), pop(r)
+ // str(r, MemOperand(sp, 4, NegPreIndex), al)
+ // ldr(r, MemOperand(sp, 4, PostIndex), al)
+ // Both instructions can be eliminated.
+ int pattern_size = 2 * kInstrSize;
+ if (FLAG_push_pop_elimination &&
+ last_bound_pos_ <= (pc_offset() - pattern_size) &&
+ reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+ // pattern
+ instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
+ instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
+ pc_ -= 2 * kInstrSize;
+ if (FLAG_print_push_pop_elimination) {
+ PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+ }
+ }
+}
+
+
+void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
+ addrmod2(cond | B26, src, dst);
+
+ // Eliminate pattern: pop(), push(r)
+ // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
+ // -> str r, [sp, 0], al
+ int pattern_size = 2 * kInstrSize;
+ if (FLAG_push_pop_elimination &&
+ last_bound_pos_ <= (pc_offset() - pattern_size) &&
+ reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+ instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
+ instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
+ pc_ -= 2 * kInstrSize;
+ emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
+ if (FLAG_print_push_pop_elimination) {
+ PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
+ }
+ }
+}
+
+
+void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
+ addrmod2(cond | B26 | B | L, dst, src);
+}
+
+
+void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
+ addrmod2(cond | B26 | B, src, dst);
+}
+
+
+void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
+ addrmod3(cond | L | B7 | H | B4, dst, src);
+}
+
+
+void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
+ addrmod3(cond | B7 | H | B4, src, dst);
+}
+
+
+void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
+ addrmod3(cond | L | B7 | S6 | B4, dst, src);
+}
+
+
+void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
+ addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
+}
+
+
+// Load/Store multiple instructions
+void Assembler::ldm(BlockAddrMode am,
+ Register base,
+ RegList dst,
+ Condition cond) {
+ // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable
+ ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
+
+ addrmod4(cond | B27 | am | L, base, dst);
+
+ // emit the constant pool after a function return implemented by ldm ..{..pc}
+ if (cond == al && (dst & pc.bit()) != 0) {
+ // There is a slight chance that the ldm instruction was actually a call,
+ // in which case it would be wrong to return into the constant pool; we
+ // recognize this case by checking if the emission of the pool was blocked
+ // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
+ // the case, we emit a jump over the pool.
+ CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
+ }
+}
+
+
+void Assembler::stm(BlockAddrMode am,
+ Register base,
+ RegList src,
+ Condition cond) {
+ addrmod4(cond | B27 | am, base, src);
+}
+
+
+// Semaphore instructions
+void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
+ ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
+ ASSERT(!dst.is(base) && !src.is(base));
+ emit(cond | P | base.code()*B16 | dst.code()*B12 |
+ B7 | B4 | src.code());
+}
+
+
+void Assembler::swpb(Register dst,
+ Register src,
+ Register base,
+ Condition cond) {
+ ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
+ ASSERT(!dst.is(base) && !src.is(base));
+ emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
+ B7 | B4 | src.code());
+}
+
+
+// Exception-generating instructions and debugging support
+void Assembler::stop(const char* msg) {
+#if !defined(__arm__)
+ // The simulator handles these special instructions and stops execution.
+ emit(15 << 28 | ((intptr_t) msg));
+#else
+ // Just issue a simple break instruction for now. Alternatively we could use
+ // the swi(0x9f0001) instruction on Linux.
+ bkpt(0);
+#endif
+}
+
+
+void Assembler::bkpt(uint32_t imm16) { // v5 and above
+ ASSERT(is_uint16(imm16));
+ emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
+}
+
+
+void Assembler::swi(uint32_t imm24, Condition cond) {
+ ASSERT(is_uint24(imm24));
+ emit(cond | 15*B24 | imm24);
+}
+
+
+// Coprocessor instructions
+void Assembler::cdp(Coprocessor coproc,
+ int opcode_1,
+ CRegister crd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2,
+ Condition cond) {
+ ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
+ crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
+}
+
+
+void Assembler::cdp2(Coprocessor coproc,
+ int opcode_1,
+ CRegister crd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2) { // v5 and above
+ cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::mcr(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2,
+ Condition cond) {
+ ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
+ rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
+}
+
+
+void Assembler::mcr2(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2) { // v5 and above
+ mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::mrc(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2,
+ Condition cond) {
+ ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
+ rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
+}
+
+
+void Assembler::mrc2(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2) { // v5 and above
+ mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::ldc(Coprocessor coproc,
+ CRegister crd,
+ const MemOperand& src,
+ LFlag l,
+ Condition cond) {
+ addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
+}
+
+
+void Assembler::ldc(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l,
+ Condition cond) {
+ // unindexed addressing
+ ASSERT(is_uint8(option));
+ emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
+ coproc*B8 | (option & 255));
+}
+
+
+void Assembler::ldc2(Coprocessor coproc,
+ CRegister crd,
+ const MemOperand& src,
+ LFlag l) { // v5 and above
+ ldc(coproc, crd, src, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::ldc2(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l) { // v5 and above
+ ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::stc(Coprocessor coproc,
+ CRegister crd,
+ const MemOperand& dst,
+ LFlag l,
+ Condition cond) {
+ addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
+}
+
+
+void Assembler::stc(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l,
+ Condition cond) {
+ // unindexed addressing
+ ASSERT(is_uint8(option));
+ emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
+ coproc*B8 | (option & 255));
+}
+
+
+void Assembler::stc2(Coprocessor
+ coproc, CRegister crd,
+ const MemOperand& dst,
+ LFlag l) { // v5 and above
+ stc(coproc, crd, dst, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::stc2(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l) { // v5 and above
+ stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+}
+
+
+// Support for VFP.
+void Assembler::vmov(const DwVfpRegister dst,
+ const Register src1,
+ const Register src2,
+ const Condition cond) {
+ // Dm = <Rt,Rt2>.
+ // Instruction details available in ARM DDI 0406A, A8-646.
+ // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
+ // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!src1.is(pc) && !src2.is(pc));
+ emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
+ src1.code()*B12 | 0xB*B8 | B4 | dst.code());
+}
+
+
+void Assembler::vmov(const Register dst1,
+ const Register dst2,
+ const DwVfpRegister src,
+ const Condition cond) {
+ // <Rt,Rt2> = Dm.
+ // Instruction details available in ARM DDI 0406A, A8-646.
+ // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
+ // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!dst1.is(pc) && !dst2.is(pc));
+ emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
+ dst1.code()*B12 | 0xB*B8 | B4 | src.code());
+}
+
+
+void Assembler::vmov(const SwVfpRegister dst,
+ const Register src,
+ const Condition cond) {
+ // Sn = Rt.
+ // Instruction details available in ARM DDI 0406A, A8-642.
+ // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
+ // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!src.is(pc));
+ emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
+ src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
+}
+
+
+void Assembler::vmov(const Register dst,
+ const SwVfpRegister src,
+ const Condition cond) {
+ // Rt = Sn.
+ // Instruction details available in ARM DDI 0406A, A8-642.
+ // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
+ // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ ASSERT(!dst.is(pc));
+ emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
+ dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
+}
+
+
+void Assembler::vcvt(const DwVfpRegister dst,
+ const SwVfpRegister src,
+ const Condition cond) {
+ // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
+ // Instruction details available in ARM DDI 0406A, A8-576.
+ // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
+ dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
+ (0x1 & src.code())*B5 | (src.code() >> 1));
+}
+
+
+void Assembler::vcvt(const SwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond) {
+ // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
+ // Instruction details available in ARM DDI 0406A, A8-576.
+ // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
+ 0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
+ 0x5*B9 | B8 | B7 | B6 | src.code());
+}
+
+
+void Assembler::vadd(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Dd = vadd(Dn, Dm) double precision floating point addition.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-536.
+ // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::vsub(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Dd = vsub(Dn, Dm) double precision floating point subtraction.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-784.
+ // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+}
+
+
+void Assembler::vmul(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Dd = vmul(Dn, Dm) double precision floating point multiplication.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-784.
+ // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::vdiv(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Dd = vdiv(Dn, Dm) double precision floating point division.
+ // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
+ // Instruction details available in ARM DDI 0406A, A8-584.
+ // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
+ dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+}
+
+
+void Assembler::vcmp(const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const SBit s,
+ const Condition cond) {
+ // vcmp(Dd, Dm) double precision floating point comparison.
+ // Instruction details available in ARM DDI 0406A, A8-570.
+ // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
+ // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
+ src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+}
+
+
+void Assembler::vmrs(Register dst, Condition cond) {
+ // Instruction details available in ARM DDI 0406A, A8-652.
+ // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
+ // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ emit(cond | 0xE*B24 | 0xF*B20 | B16 |
+ dst.code()*B12 | 0xA*B8 | B4);
+}
+
+
+// Pseudo instructions
+void Assembler::lea(Register dst,
+ const MemOperand& x,
+ SBit s,
+ Condition cond) {
+ int am = x.am_;
+ if (!x.rm_.is_valid()) {
+ // immediate offset
+ if ((am & P) == 0) // post indexing
+ mov(dst, Operand(x.rn_), s, cond);
+ else if ((am & U) == 0) // negative indexing
+ sub(dst, x.rn_, Operand(x.offset_), s, cond);
+ else
+ add(dst, x.rn_, Operand(x.offset_), s, cond);
+ } else {
+ // Register offset (shift_imm_ and shift_op_ are 0) or scaled
+ // register offset the constructors make sure than both shift_imm_
+ // and shift_op_ are initialized.
+ ASSERT(!x.rm_.is(pc));
+ if ((am & P) == 0) // post indexing
+ mov(dst, Operand(x.rn_), s, cond);
+ else if ((am & U) == 0) // negative indexing
+ sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
+ else
+ add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
+ }
+}
+
+
+bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
+ uint32_t dummy1;
+ uint32_t dummy2;
+ return fits_shifter(imm32, &dummy1, &dummy2, NULL);
+}
+
+
+void Assembler::BlockConstPoolFor(int instructions) {
+ BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+
+// Debugging
+void Assembler::RecordJSReturn() {
+ WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_debug_code) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+ if (pos == RelocInfo::kNoPosition) return;
+ ASSERT(pos >= 0);
+ current_position_ = pos;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+ if (pos == RelocInfo::kNoPosition) return;
+ ASSERT(pos >= 0);
+ current_statement_position_ = pos;
+}
+
+
+void Assembler::WriteRecordedPositions() {
+ // Write the statement position if it is different from what was written last
+ // time.
+ if (current_statement_position_ != written_statement_position_) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
+ written_statement_position_ = current_statement_position_;
+ }
+
+ // Write the position if it is different from what was written last time and
+ // also different from the written statement position.
+ if (current_position_ != written_position_ &&
+ current_position_ != written_statement_position_) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::POSITION, current_position_);
+ written_position_ = current_position_;
+ }
+}
+
+
+void Assembler::GrowBuffer() {
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // compute new buffer size
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4*KB) {
+ desc.buffer_size = 4*KB;
+ } else if (buffer_size_ < 1*MB) {
+ desc.buffer_size = 2*buffer_size_;
+ } else {
+ desc.buffer_size = buffer_size_ + 1*MB;
+ }
+ CHECK_GT(desc.buffer_size, 0); // no overflow
+
+ // setup new buffer
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+ // copy the data
+ int pc_delta = desc.buffer - buffer_;
+ int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ memmove(desc.buffer, buffer_, desc.instr_size);
+ memmove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // switch buffers
+ DeleteArray(buffer_);
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ += pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // none of our relocation types are pc relative pointing outside the code
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
+ // to relocate any emitted relocation entries
+
+ // relocate pending relocation entries
+ for (int i = 0; i < num_prinfo_; i++) {
+ RelocInfo& rinfo = prinfo_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION);
+ if (rinfo.rmode() != RelocInfo::JS_RETURN) {
+ rinfo.set_pc(rinfo.pc() + pc_delta);
+ }
+ }
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
+ if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
+ // Adjust code for new modes
+ ASSERT(RelocInfo::IsJSReturn(rmode)
+ || RelocInfo::IsComment(rmode)
+ || RelocInfo::IsPosition(rmode));
+ // these modes do not need an entry in the constant pool
+ } else {
+ ASSERT(num_prinfo_ < kMaxNumPRInfo);
+ prinfo_[num_prinfo_++] = rinfo;
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
+ }
+ if (rinfo.rmode() != RelocInfo::NONE) {
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+#ifdef DEBUG
+ if (!Serializer::enabled()) {
+ Serializer::TooLateToEnableNow();
+ }
+#endif
+ if (!Serializer::enabled() && !FLAG_debug_code) {
+ return;
+ }
+ }
+ ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ reloc_info_writer.Write(&rinfo);
+ }
+}
+
+
+void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ // Calculate the offset of the next check. It will be overwritten
+ // when a const pool is generated or when const pools are being
+ // blocked for a specific range.
+ next_buffer_check_ = pc_offset() + kCheckConstInterval;
+
+ // There is nothing to do if there are no pending relocation info entries
+ if (num_prinfo_ == 0) return;
+
+ // We emit a constant pool at regular intervals of about kDistBetweenPools
+ // or when requested by parameter force_emit (e.g. after each function).
+ // We prefer not to emit a jump unless the max distance is reached or if we
+ // are running low on slots, which can happen if a lot of constants are being
+ // emitted (e.g. --debug-code and many static references).
+ int dist = pc_offset() - last_const_pool_end_;
+ if (!force_emit && dist < kMaxDistBetweenPools &&
+ (require_jump || dist < kDistBetweenPools) &&
+ // TODO(1236125): Cleanup the "magic" number below. We know that
+ // the code generation will test every kCheckConstIntervalInst.
+ // Thus we are safe as long as we generate less than 7 constant
+ // entries per instruction.
+ (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
+ return;
+ }
+
+ // If we did not return by now, we need to emit the constant pool soon.
+
+ // However, some small sequences of instructions must not be broken up by the
+ // insertion of a constant pool; such sequences are protected by setting
+ // no_const_pool_before_, which is checked here. Also, recursive calls to
+ // CheckConstPool are blocked by no_const_pool_before_.
+ if (pc_offset() < no_const_pool_before_) {
+ // Emission is currently blocked; make sure we try again as soon as possible
+ next_buffer_check_ = no_const_pool_before_;
+
+ // Something is wrong if emission is forced and blocked at the same time
+ ASSERT(!force_emit);
+ return;
+ }
+
+ int jump_instr = require_jump ? kInstrSize : 0;
+
+ // Check that the code buffer is large enough before emitting the constant
+ // pool and relocation information (include the jump over the pool and the
+ // constant pool marker).
+ int max_needed_space =
+ jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
+ while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
+
+ // Block recursive calls to CheckConstPool
+ BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
+ num_prinfo_*kInstrSize);
+ // Don't bother to check for the emit calls below.
+ next_buffer_check_ = no_const_pool_before_;
+
+ // Emit jump over constant pool if necessary
+ Label after_pool;
+ if (require_jump) b(&after_pool);
+
+ RecordComment("[ Constant Pool");
+
+ // Put down constant pool marker
+ // "Undefined instruction" as specified by A3.1 Instruction set encoding
+ emit(0x03000000 | num_prinfo_);
+
+ // Emit constant pool entries
+ for (int i = 0; i < num_prinfo_; i++) {
+ RelocInfo& rinfo = prinfo_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION &&
+ rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
+ Instr instr = instr_at(rinfo.pc());
+
+ // Instruction to patch must be a ldr/str [pc, #offset]
+ // P and U set, B and W clear, Rn == pc, offset12 still 0
+ ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
+ (2*B25 | P | U | pc.code()*B16));
+ int delta = pc_ - rinfo.pc() - 8;
+ ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
+ if (delta < 0) {
+ instr &= ~U;
+ delta = -delta;
+ }
+ ASSERT(is_uint12(delta));
+ instr_at_put(rinfo.pc(), instr + delta);
+ emit(rinfo.data());
+ }
+ num_prinfo_ = 0;
+ last_const_pool_end_ = pc_offset();
+
+ RecordComment("]");
+
+ if (after_pool.is_linked()) {
+ bind(&after_pool);
+ }
+
+ // Since a constant pool was just emitted, move the check offset forward by
+ // the standard interval.
+ next_buffer_check_ = pc_offset() + kCheckConstInterval;
+}
+
+
+} } // namespace v8::internal
diff --git a/src/arm/assembler-thumb2.h b/src/arm/assembler-thumb2.h
new file mode 100644
index 0000000..31e9487
--- /dev/null
+++ b/src/arm/assembler-thumb2.h
@@ -0,0 +1,1027 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+// A light-weight ARM Assembler
+// Generates user mode instructions for the ARM architecture up to version 5
+
+#ifndef V8_ARM_ASSEMBLER_THUMB2_H_
+#define V8_ARM_ASSEMBLER_THUMB2_H_
+#include <stdio.h>
+#include "assembler.h"
+#include "serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+//
+// Core register
+struct Register {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(Register reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // (unfortunately we can't make this private in a struct)
+ int code_;
+};
+
+
+extern Register no_reg;
+extern Register r0;
+extern Register r1;
+extern Register r2;
+extern Register r3;
+extern Register r4;
+extern Register r5;
+extern Register r6;
+extern Register r7;
+extern Register r8;
+extern Register r9;
+extern Register r10;
+extern Register fp;
+extern Register ip;
+extern Register sp;
+extern Register lr;
+extern Register pc;
+
+
+// Single word VFP register.
+struct SwVfpRegister {
+ bool is_valid() const { return 0 <= code_ && code_ < 32; }
+ bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ int code_;
+};
+
+
+// Double word VFP register.
+struct DwVfpRegister {
+ // Supporting d0 to d15, can be later extended to d31.
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ int code_;
+};
+
+
+// Support for VFP registers s0 to s31 (d0 to d15).
+// Note that "s(N):s(N+1)" is the same as "d(N/2)".
+extern SwVfpRegister s0;
+extern SwVfpRegister s1;
+extern SwVfpRegister s2;
+extern SwVfpRegister s3;
+extern SwVfpRegister s4;
+extern SwVfpRegister s5;
+extern SwVfpRegister s6;
+extern SwVfpRegister s7;
+extern SwVfpRegister s8;
+extern SwVfpRegister s9;
+extern SwVfpRegister s10;
+extern SwVfpRegister s11;
+extern SwVfpRegister s12;
+extern SwVfpRegister s13;
+extern SwVfpRegister s14;
+extern SwVfpRegister s15;
+extern SwVfpRegister s16;
+extern SwVfpRegister s17;
+extern SwVfpRegister s18;
+extern SwVfpRegister s19;
+extern SwVfpRegister s20;
+extern SwVfpRegister s21;
+extern SwVfpRegister s22;
+extern SwVfpRegister s23;
+extern SwVfpRegister s24;
+extern SwVfpRegister s25;
+extern SwVfpRegister s26;
+extern SwVfpRegister s27;
+extern SwVfpRegister s28;
+extern SwVfpRegister s29;
+extern SwVfpRegister s30;
+extern SwVfpRegister s31;
+
+extern DwVfpRegister d0;
+extern DwVfpRegister d1;
+extern DwVfpRegister d2;
+extern DwVfpRegister d3;
+extern DwVfpRegister d4;
+extern DwVfpRegister d5;
+extern DwVfpRegister d6;
+extern DwVfpRegister d7;
+extern DwVfpRegister d8;
+extern DwVfpRegister d9;
+extern DwVfpRegister d10;
+extern DwVfpRegister d11;
+extern DwVfpRegister d12;
+extern DwVfpRegister d13;
+extern DwVfpRegister d14;
+extern DwVfpRegister d15;
+
+
+// Coprocessor register
+struct CRegister {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(CRegister creg) const { return code_ == creg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // (unfortunately we can't make this private in a struct)
+ int code_;
+};
+
+
+extern CRegister no_creg;
+extern CRegister cr0;
+extern CRegister cr1;
+extern CRegister cr2;
+extern CRegister cr3;
+extern CRegister cr4;
+extern CRegister cr5;
+extern CRegister cr6;
+extern CRegister cr7;
+extern CRegister cr8;
+extern CRegister cr9;
+extern CRegister cr10;
+extern CRegister cr11;
+extern CRegister cr12;
+extern CRegister cr13;
+extern CRegister cr14;
+extern CRegister cr15;
+
+
+// Coprocessor number
+enum Coprocessor {
+ p0 = 0,
+ p1 = 1,
+ p2 = 2,
+ p3 = 3,
+ p4 = 4,
+ p5 = 5,
+ p6 = 6,
+ p7 = 7,
+ p8 = 8,
+ p9 = 9,
+ p10 = 10,
+ p11 = 11,
+ p12 = 12,
+ p13 = 13,
+ p14 = 14,
+ p15 = 15
+};
+
+
+// Condition field in instructions
+enum Condition {
+ eq = 0 << 28, // Z set equal.
+ ne = 1 << 28, // Z clear not equal.
+ nz = 1 << 28, // Z clear not zero.
+ cs = 2 << 28, // C set carry set.
+ hs = 2 << 28, // C set unsigned higher or same.
+ cc = 3 << 28, // C clear carry clear.
+ lo = 3 << 28, // C clear unsigned lower.
+ mi = 4 << 28, // N set negative.
+ pl = 5 << 28, // N clear positive or zero.
+ vs = 6 << 28, // V set overflow.
+ vc = 7 << 28, // V clear no overflow.
+ hi = 8 << 28, // C set, Z clear unsigned higher.
+ ls = 9 << 28, // C clear or Z set unsigned lower or same.
+ ge = 10 << 28, // N == V greater or equal.
+ lt = 11 << 28, // N != V less than.
+ gt = 12 << 28, // Z clear, N == V greater than.
+ le = 13 << 28, // Z set or N != V less then or equal
+ al = 14 << 28 // always.
+};
+
+
+// Returns the equivalent of !cc.
+INLINE(Condition NegateCondition(Condition cc));
+
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseCondition(Condition cc) {
+ switch (cc) {
+ case lo:
+ return hi;
+ case hi:
+ return lo;
+ case hs:
+ return ls;
+ case ls:
+ return hs;
+ case lt:
+ return gt;
+ case gt:
+ return lt;
+ case ge:
+ return le;
+ case le:
+ return ge;
+ default:
+ return cc;
+ };
+}
+
+
+// Branch hints are not used on the ARM. They are defined so that they can
+// appear in shared function signatures, but will be ignored in ARM
+// implementations.
+enum Hint { no_hint };
+
+// Hints are not used on the arm. Negating is trivial.
+inline Hint NegateHint(Hint ignored) { return no_hint; }
+
+
+// -----------------------------------------------------------------------------
+// Addressing modes and instruction variants
+
+// Shifter operand shift operation
+enum ShiftOp {
+ LSL = 0 << 5,
+ LSR = 1 << 5,
+ ASR = 2 << 5,
+ ROR = 3 << 5,
+ RRX = -1
+};
+
+
+// Condition code updating mode
+enum SBit {
+ SetCC = 1 << 20, // set condition code
+ LeaveCC = 0 << 20 // leave condition code unchanged
+};
+
+
+// Status register selection
+enum SRegister {
+ CPSR = 0 << 22,
+ SPSR = 1 << 22
+};
+
+
+// Status register fields
+enum SRegisterField {
+ CPSR_c = CPSR | 1 << 16,
+ CPSR_x = CPSR | 1 << 17,
+ CPSR_s = CPSR | 1 << 18,
+ CPSR_f = CPSR | 1 << 19,
+ SPSR_c = SPSR | 1 << 16,
+ SPSR_x = SPSR | 1 << 17,
+ SPSR_s = SPSR | 1 << 18,
+ SPSR_f = SPSR | 1 << 19
+};
+
+// Status register field mask (or'ed SRegisterField enum values)
+typedef uint32_t SRegisterFieldMask;
+
+
+// Memory operand addressing mode
+enum AddrMode {
+ // bit encoding P U W
+ Offset = (8|4|0) << 21, // offset (without writeback to base)
+ PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
+ PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
+ NegOffset = (8|0|0) << 21, // negative offset (without writeback to base)
+ NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
+ NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
+};
+
+
+// Load/store multiple addressing mode
+enum BlockAddrMode {
+ // bit encoding P U W
+ da = (0|0|0) << 21, // decrement after
+ ia = (0|4|0) << 21, // increment after
+ db = (8|0|0) << 21, // decrement before
+ ib = (8|4|0) << 21, // increment before
+ da_w = (0|0|1) << 21, // decrement after with writeback to base
+ ia_w = (0|4|1) << 21, // increment after with writeback to base
+ db_w = (8|0|1) << 21, // decrement before with writeback to base
+ ib_w = (8|4|1) << 21 // increment before with writeback to base
+};
+
+
+// Coprocessor load/store operand size
+enum LFlag {
+ Long = 1 << 22, // long load/store coprocessor
+ Short = 0 << 22 // short load/store coprocessor
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+// Class Operand represents a shifter operand in data processing instructions
+class Operand BASE_EMBEDDED {
+ public:
+ // immediate
+ INLINE(explicit Operand(int32_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE));
+ INLINE(explicit Operand(const ExternalReference& f));
+ INLINE(explicit Operand(const char* s));
+ INLINE(explicit Operand(Object** opp));
+ INLINE(explicit Operand(Context** cpp));
+ explicit Operand(Handle<Object> handle);
+ INLINE(explicit Operand(Smi* value));
+
+ // rm
+ INLINE(explicit Operand(Register rm));
+
+ // rm <shift_op> shift_imm
+ explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
+
+ // rm <shift_op> rs
+ explicit Operand(Register rm, ShiftOp shift_op, Register rs);
+
+ // Return true if this is a register operand.
+ INLINE(bool is_reg() const);
+
+ Register rm() const { return rm_; }
+
+ private:
+ Register rm_;
+ Register rs_;
+ ShiftOp shift_op_;
+ int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
+ int32_t imm32_; // valid if rm_ == no_reg
+ RelocInfo::Mode rmode_;
+
+ friend class Assembler;
+};
+
+
+// Class MemOperand represents a memory operand in load and store instructions
+class MemOperand BASE_EMBEDDED {
+ public:
+ // [rn +/- offset] Offset/NegOffset
+ // [rn +/- offset]! PreIndex/NegPreIndex
+ // [rn], +/- offset PostIndex/NegPostIndex
+ // offset is any signed 32-bit value; offset is first loaded to register ip if
+ // it does not fit the addressing mode (12-bit unsigned and sign bit)
+ explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
+
+ // [rn +/- rm] Offset/NegOffset
+ // [rn +/- rm]! PreIndex/NegPreIndex
+ // [rn], +/- rm PostIndex/NegPostIndex
+ explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
+
+ // [rn +/- rm <shift_op> shift_imm] Offset/NegOffset
+ // [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex
+ // [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
+ explicit MemOperand(Register rn, Register rm,
+ ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
+
+ private:
+ Register rn_; // base
+ Register rm_; // register offset
+ int32_t offset_; // valid if rm_ == no_reg
+ ShiftOp shift_op_;
+ int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
+ AddrMode am_; // bits P, U, and W
+
+ friend class Assembler;
+};
+
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+class CpuFeatures : public AllStatic {
+ public:
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
+
+ // Check whether a feature is supported by the target CPU.
+ static bool IsSupported(CpuFeature f) {
+ if (f == VFP3 && !FLAG_enable_vfp3) return false;
+ return (supported_ & (1u << f)) != 0;
+ }
+
+ // Check whether a feature is currently enabled.
+ static bool IsEnabled(CpuFeature f) {
+ return (enabled_ & (1u << f)) != 0;
+ }
+
+ // Enable a specified feature within a scope.
+ class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+ explicit Scope(CpuFeature f) {
+ ASSERT(CpuFeatures::IsSupported(f));
+ ASSERT(!Serializer::enabled() ||
+ (found_by_runtime_probing_ & (1u << f)) == 0);
+ old_enabled_ = CpuFeatures::enabled_;
+ CpuFeatures::enabled_ |= 1u << f;
+ }
+ ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
+ private:
+ unsigned old_enabled_;
+#else
+ public:
+ explicit Scope(CpuFeature f) {}
+#endif
+ };
+
+ private:
+ static unsigned supported_;
+ static unsigned enabled_;
+ static unsigned found_by_runtime_probing_;
+};
+
+
+typedef int32_t Instr;
+
+
+extern const Instr kMovLrPc;
+extern const Instr kLdrPCPattern;
+
+
+class Assembler : public Malloced {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ Assembler(void* buffer, int buffer_size);
+ ~Assembler();
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked in between GetCode() calls.
+ void GetCode(CodeDesc* desc);
+
+ // Label operations & relative jumps (PPUM Appendix D)
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+
+ void bind(Label* L); // binds an unbound label L to the current code position
+
+ // Returns the branch offset to the given label from the current code position
+ // Links the label to the current position if it is still unbound
+ // Manages the jump elimination optimization if the second parameter is true.
+ int branch_offset(Label* L, bool jump_elimination_allowed);
+
+ // Puts a labels target address at the given position.
+ // The high 8 bits are set to zero.
+ void label_at_put(Label* L, int at_offset);
+
+ // Return the address in the constant pool of the code target address used by
+ // the branch/call instruction at pc.
+ INLINE(static Address target_address_address_at(Address pc));
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ INLINE(static Address target_address_at(Address pc));
+ INLINE(static void set_target_address_at(Address pc, Address target));
+
+ // This sets the branch destination (which is in the constant pool on ARM).
+ // This is for calls and branches within generated code.
+ inline static void set_target_at(Address constant_pool_entry, Address target);
+
+ // This sets the branch destination (which is in the constant pool on ARM).
+ // This is for calls and branches to runtime code.
+ inline static void set_external_target_at(Address constant_pool_entry,
+ Address target) {
+ set_target_at(constant_pool_entry, target);
+ }
+
+ // Here we are patching the address in the constant pool, not the actual call
+ // instruction. The address in the constant pool is the same size as a
+ // pointer.
+ static const int kCallTargetSize = kPointerSize;
+ static const int kExternalTargetSize = kPointerSize;
+
+ // Size of an instruction.
+ static const int kInstrSize = sizeof(Instr);
+
+ // Distance between the instruction referring to the address of the call
+ // target (ldr pc, [target addr in const pool]) and the return address
+ static const int kCallTargetAddressOffset = kInstrSize;
+
+ // Distance between start of patched return sequence and the emitted address
+ // to jump to.
+ static const int kPatchReturnSequenceAddressOffset = kInstrSize;
+
+ // Difference between address of current opcode and value read from pc
+ // register.
+ static const int kPcLoadDelta = 8;
+
+ static const int kJSReturnSequenceLength = 4;
+
+ // ---------------------------------------------------------------------------
+ // Code generation
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+
+ // Branch instructions
+ void b(int branch_offset, Condition cond = al);
+ void bl(int branch_offset, Condition cond = al);
+ void blx(int branch_offset); // v5 and above
+ void blx(Register target, Condition cond = al); // v5 and above
+ void bx(Register target, Condition cond = al); // v5 and above, plus v4t
+
+ // Convenience branch instructions using labels
+ void b(Label* L, Condition cond = al) {
+ b(branch_offset(L, cond == al), cond);
+ }
+ void b(Condition cond, Label* L) { b(branch_offset(L, cond == al), cond); }
+ void bl(Label* L, Condition cond = al) { bl(branch_offset(L, false), cond); }
+ void bl(Condition cond, Label* L) { bl(branch_offset(L, false), cond); }
+ void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
+
+ // Data-processing instructions
+ void and_(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void eor(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void sub(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+ void sub(Register dst, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al) {
+ sub(dst, src1, Operand(src2), s, cond);
+ }
+
+ void rsb(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void add(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void adc(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void sbc(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void rsc(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void tst(Register src1, const Operand& src2, Condition cond = al);
+ void tst(Register src1, Register src2, Condition cond = al) {
+ tst(src1, Operand(src2), cond);
+ }
+
+ void teq(Register src1, const Operand& src2, Condition cond = al);
+
+ void cmp(Register src1, const Operand& src2, Condition cond = al);
+ void cmp(Register src1, Register src2, Condition cond = al) {
+ cmp(src1, Operand(src2), cond);
+ }
+
+ void cmn(Register src1, const Operand& src2, Condition cond = al);
+
+ void orr(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+ void orr(Register dst, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al) {
+ orr(dst, src1, Operand(src2), s, cond);
+ }
+
+ void mov(Register dst, const Operand& src,
+ SBit s = LeaveCC, Condition cond = al);
+ void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) {
+ mov(dst, Operand(src), s, cond);
+ }
+
+ void bic(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void mvn(Register dst, const Operand& src,
+ SBit s = LeaveCC, Condition cond = al);
+
+ // Multiply instructions
+
+ void mla(Register dst, Register src1, Register src2, Register srcA,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void mul(Register dst, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void smlal(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void smull(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void umlal(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void umull(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ // Miscellaneous arithmetic instructions
+
+ void clz(Register dst, Register src, Condition cond = al); // v5 and above
+
+ // Status register access instructions
+
+ void mrs(Register dst, SRegister s, Condition cond = al);
+ void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
+
+ // Load/Store instructions
+ void ldr(Register dst, const MemOperand& src, Condition cond = al);
+ void str(Register src, const MemOperand& dst, Condition cond = al);
+ void ldrb(Register dst, const MemOperand& src, Condition cond = al);
+ void strb(Register src, const MemOperand& dst, Condition cond = al);
+ void ldrh(Register dst, const MemOperand& src, Condition cond = al);
+ void strh(Register src, const MemOperand& dst, Condition cond = al);
+ void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
+ void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
+
+ // Load/Store multiple instructions
+ void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
+ void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
+
+ // Semaphore instructions
+ void swp(Register dst, Register src, Register base, Condition cond = al);
+ void swpb(Register dst, Register src, Register base, Condition cond = al);
+
+ // Exception-generating instructions and debugging support
+ void stop(const char* msg);
+
+ void bkpt(uint32_t imm16); // v5 and above
+ void swi(uint32_t imm24, Condition cond = al);
+
+ // Coprocessor instructions
+
+ void cdp(Coprocessor coproc, int opcode_1,
+ CRegister crd, CRegister crn, CRegister crm,
+ int opcode_2, Condition cond = al);
+
+ void cdp2(Coprocessor coproc, int opcode_1,
+ CRegister crd, CRegister crn, CRegister crm,
+ int opcode_2); // v5 and above
+
+ void mcr(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0, Condition cond = al);
+
+ void mcr2(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0); // v5 and above
+
+ void mrc(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0, Condition cond = al);
+
+ void mrc2(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0); // v5 and above
+
+ void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
+ LFlag l = Short, Condition cond = al);
+ void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short, Condition cond = al);
+
+ void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
+ LFlag l = Short); // v5 and above
+ void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short); // v5 and above
+
+ void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
+ LFlag l = Short, Condition cond = al);
+ void stc(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short, Condition cond = al);
+
+ void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
+ LFlag l = Short); // v5 and above
+ void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short); // v5 and above
+
+ // Support for VFP.
+ // All these APIs support S0 to S31 and D0 to D15.
+ // Currently these APIs do not support extended D registers, i.e, D16 to D31.
+ // However, some simple modifications can allow
+ // these APIs to support D16 to D31.
+
+ void vmov(const DwVfpRegister dst,
+ const Register src1,
+ const Register src2,
+ const Condition cond = al);
+ void vmov(const Register dst1,
+ const Register dst2,
+ const DwVfpRegister src,
+ const Condition cond = al);
+ void vmov(const SwVfpRegister dst,
+ const Register src,
+ const Condition cond = al);
+ void vmov(const Register dst,
+ const SwVfpRegister src,
+ const Condition cond = al);
+ void vcvt(const DwVfpRegister dst,
+ const SwVfpRegister src,
+ const Condition cond = al);
+ void vcvt(const SwVfpRegister dst,
+ const DwVfpRegister src,
+ const Condition cond = al);
+
+ void vadd(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vsub(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vmul(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vdiv(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vcmp(const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const SBit s = LeaveCC,
+ const Condition cond = al);
+ void vmrs(const Register dst,
+ const Condition cond = al);
+
+ // Pseudo instructions
+ void nop() { mov(r0, Operand(r0)); }
+
+ void push(Register src, Condition cond = al) {
+ str(src, MemOperand(sp, 4, NegPreIndex), cond);
+ }
+
+ void pop(Register dst, Condition cond = al) {
+ ldr(dst, MemOperand(sp, 4, PostIndex), cond);
+ }
+
+ void pop() {
+ add(sp, sp, Operand(kPointerSize));
+ }
+
+ // Load effective address of memory operand x into register dst
+ void lea(Register dst, const MemOperand& x,
+ SBit s = LeaveCC, Condition cond = al);
+
+ // Jump unconditionally to given label.
+ void jmp(Label* L) { b(L, al); }
+
+ // Check the code size generated from label to here.
+ int InstructionsGeneratedSince(Label* l) {
+ return (pc_offset() - l->pos()) / kInstrSize;
+ }
+
+ // Check whether an immediate fits an addressing mode 1 instruction.
+ bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
+
+ // Postpone the generation of the constant pool for the specified number of
+ // instructions.
+ void BlockConstPoolFor(int instructions);
+
+ // Debugging
+
+ // Mark address of the ExitJSFrame code.
+ void RecordJSReturn();
+
+ // Record a comment relocation entry that can be used by a disassembler.
+ // Use --debug_code to enable.
+ void RecordComment(const char* msg);
+
+ void RecordPosition(int pos);
+ void RecordStatementPosition(int pos);
+ void WriteRecordedPositions();
+
+ int pc_offset() const { return pc_ - buffer_; }
+ int current_position() const { return current_position_; }
+ int current_statement_position() const { return current_position_; }
+
+ protected:
+ int buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+ // Read/patch instructions
+ static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+ void instr_at_put(byte* pc, Instr instr) {
+ *reinterpret_cast<Instr*>(pc) = instr;
+ }
+ Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ void instr_at_put(int pos, Instr instr) {
+ *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ }
+
+ // Decode branch instruction at pos and return branch target pos
+ int target_at(int pos);
+
+ // Patch branch instruction at pos to branch to given branch target pos
+ void target_at_put(int pos, int target_pos);
+
+ // Check if is time to emit a constant pool for pending reloc info entries
+ void CheckConstPool(bool force_emit, bool require_jump);
+
+ // Block the emission of the constant pool before pc_offset
+ void BlockConstPoolBefore(int pc_offset) {
+ if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
+ }
+
+ private:
+ // Code buffer:
+ // The buffer into which code and relocation info are generated.
+ byte* buffer_;
+ int buffer_size_;
+ // True if the assembler owns the buffer, false if buffer is external.
+ bool own_buffer_;
+
+ // Buffer size and constant pool distance are checked together at regular
+ // intervals of kBufferCheckInterval emitted bytes
+ static const int kBufferCheckInterval = 1*KB/2;
+ int next_buffer_check_; // pc offset of next buffer check
+
+ // Code generation
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries.
+ static const int kGap = 32;
+ byte* pc_; // the program counter; moves forward
+
+ // Constant pool generation
+ // Pools are emitted in the instruction stream, preferably after unconditional
+ // jumps or after returns from functions (in dead code locations).
+ // If a long code sequence does not contain unconditional jumps, it is
+ // necessary to emit the constant pool before the pool gets too far from the
+ // location it is accessed from. In this case, we emit a jump over the emitted
+ // constant pool.
+ // Constants in the pool may be addresses of functions that gets relocated;
+ // if so, a relocation info entry is associated to the constant pool entry.
+
+ // Repeated checking whether the constant pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated. That also means that the sizing of the buffers is not
+ // an exact science, and that we rely on some slop to not overrun buffers.
+ static const int kCheckConstIntervalInst = 32;
+ static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
+
+
+ // Pools are emitted after function return and in dead code at (more or less)
+ // regular intervals of kDistBetweenPools bytes
+ static const int kDistBetweenPools = 1*KB;
+
+ // Constants in pools are accessed via pc relative addressing, which can
+ // reach +/-4KB thereby defining a maximum distance between the instruction
+ // and the accessed constant. We satisfy this constraint by limiting the
+ // distance between pools.
+ static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
+
+ // Emission of the constant pool may be blocked in some code sequences
+ int no_const_pool_before_; // block emission before this pc offset
+
+ // Keep track of the last emitted pool to guarantee a maximal distance
+ int last_const_pool_end_; // pc offset following the last constant pool
+
+ // Relocation info generation
+ // Each relocation is encoded as a variable size value
+ static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+ // Relocation info records are also used during code generation as temporary
+ // containers for constants and code target addresses until they are emitted
+ // to the constant pool. These pending relocation info records are temporarily
+ // stored in a separate buffer until a constant pool is emitted.
+ // If every instruction in a long sequence is accessing the pool, we need one
+ // pending relocation entry per instruction.
+ static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
+ RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info
+ int num_prinfo_; // number of pending reloc info entries in the buffer
+
+ // The bound position, before this we cannot do instruction elimination.
+ int last_bound_pos_;
+
+ // source position information
+ int current_position_;
+ int current_statement_position_;
+ int written_position_;
+ int written_statement_position_;
+
+ // Code emission
+ inline void CheckBuffer();
+ void GrowBuffer();
+ inline void emit(Instr x);
+
+ // Instruction generation
+ void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
+ void addrmod2(Instr instr, Register rd, const MemOperand& x);
+ void addrmod3(Instr instr, Register rd, const MemOperand& x);
+ void addrmod4(Instr instr, Register rn, RegList rl);
+ void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
+
+ // Labels
+ void print(Label* L);
+ void bind_to(Label* L, int pos);
+ void link_to(Label* L, Label* appendix);
+ void next(Label* L);
+
+ // Record reloc info for current pc_
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ friend class RegExpMacroAssemblerARM;
+ friend class RelocInfo;
+ friend class CodePatcher;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_ASSEMBLER_THUMB2_H_
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 5389a3c..ae7dae3 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -38,15 +38,32 @@
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
- // TODO(428): Don't pass the function in a static variable.
- __ mov(ip, Operand(ExternalReference::builtin_passed_function()));
- __ str(r1, MemOperand(ip, 0));
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments excluding receiver
+ // -- r1 : called function (only guaranteed when
+ // extra_args requires it)
+ // -- cp : context
+ // -- sp[0] : last argument
+ // -- ...
+ // -- sp[4 * (argc - 1)] : first argument (argc == r0)
+ // -- sp[4 * argc] : receiver
+ // -----------------------------------
- // The actual argument count has already been loaded into register
- // r0, but JumpToRuntime expects r0 to contain the number of
- // arguments including the receiver.
- __ add(r0, r0, Operand(1));
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ __ push(r1);
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToRuntime expects r0 to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ add(r0, r0, Operand(num_extra_args + 1));
__ JumpToRuntime(ExternalReference(id));
}
@@ -491,7 +508,8 @@
}
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function) {
// Enter a construct frame.
__ EnterConstructFrame();
@@ -727,8 +745,17 @@
// Call the function.
// r0: number of arguments
// r1: constructor function
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION);
+ if (is_api_function) {
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ Handle<Code> code = Handle<Code>(
+ Builtins::builtin(Builtins::HandleApiCallConstruct));
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION);
+ } else {
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION);
+ }
// Pop the function from the stack.
// sp[0]: constructor function
@@ -783,6 +810,16 @@
}
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true);
+}
+
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
diff --git a/src/arm/codegen-arm-inl.h b/src/arm/codegen-arm-inl.h
index 749f32d..17e18d9 100644
--- a/src/arm/codegen-arm-inl.h
+++ b/src/arm/codegen-arm-inl.h
@@ -67,16 +67,6 @@
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- GenerateFastMathOp(SIN, args);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- GenerateFastMathOp(COS, args);
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 7c0b0c6..0c1dbcc 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -44,9 +44,10 @@
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc);
+ Condition cc,
+ bool never_nan_nan);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Label* rhs_not_nan,
+ Label* lhs_not_nan,
Label* slow,
bool strict);
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
@@ -186,12 +187,18 @@
function_return_is_shadowed_ = false;
VirtualFrame::SpilledScope spilled_scope;
- if (scope_->num_heap_slots() > 0) {
+ int heap_slots = scope_->num_heap_slots();
+ if (heap_slots > 0) {
// Allocate local context.
// Get outer context and create a new context based on it.
__ ldr(r0, frame_->Function());
frame_->EmitPush(r0);
- frame_->CallRuntime(Runtime::kNewContext, 1); // r0 holds the result
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ frame_->CallStub(&stub, 1);
+ } else {
+ frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
#ifdef DEBUG
JumpTarget verified_true;
@@ -240,28 +247,35 @@
// initialization because the arguments object may be stored in the
// context.
if (scope_->arguments() != NULL) {
- ASSERT(scope_->arguments_shadow() != NULL);
Comment cmnt(masm_, "[ allocate arguments object");
- { Reference shadow_ref(this, scope_->arguments_shadow());
- { Reference arguments_ref(this, scope_->arguments());
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ ldr(r2, frame_->Function());
- // The receiver is below the arguments, the return address,
- // and the frame pointer on the stack.
- const int kReceiverDisplacement = 2 + scope_->num_parameters();
- __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
- __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
- frame_->Adjust(3);
- __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
- frame_->CallStub(&stub, 3);
- frame_->EmitPush(r0);
- arguments_ref.SetValue(NOT_CONST_INIT);
- }
- shadow_ref.SetValue(NOT_CONST_INIT);
- }
+ ASSERT(scope_->arguments_shadow() != NULL);
+ Variable* arguments = scope_->arguments()->var();
+ Variable* shadow = scope_->arguments_shadow()->var();
+ ASSERT(arguments != NULL && arguments->slot() != NULL);
+ ASSERT(shadow != NULL && shadow->slot() != NULL);
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ ldr(r2, frame_->Function());
+ // The receiver is below the arguments, the return address, and the
+ // frame pointer on the stack.
+ const int kReceiverDisplacement = 2 + scope_->num_parameters();
+ __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
+ __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+ frame_->Adjust(3);
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
+ frame_->CallStub(&stub, 3);
+ frame_->EmitPush(r0);
+ StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+ StoreToSlot(shadow->slot(), NOT_CONST_INIT);
frame_->Drop(); // Value is no longer needed.
}
+ // Initialize ThisFunction reference if present.
+ if (scope_->is_function_scope() && scope_->function() != NULL) {
+ __ mov(ip, Operand(Factory::the_hole_value()));
+ frame_->EmitPush(ip);
+ StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ }
+
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
@@ -613,15 +627,7 @@
// The expression is either a property or a variable proxy that rewrites
// to a property.
LoadAndSpill(property->obj());
- // We use a named reference if the key is a literal symbol, unless it is
- // a string that can be legally parsed as an integer. This is because
- // otherwise we will not get into the slow case code that handles [] on
- // String objects.
- Literal* literal = property->key()->AsLiteral();
- uint32_t dummy;
- if (literal != NULL &&
- literal->handle()->IsSymbol() &&
- !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+ if (property->key()->IsPropertyName()) {
ref->set_type(Reference::NAMED);
} else {
LoadAndSpill(property->key());
@@ -1085,7 +1091,8 @@
// Call the function on the stack with the given arguments.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
- int position) {
+ CallFunctionFlags flags,
+ int position) {
VirtualFrame::SpilledScope spilled_scope;
// Push the arguments ("left-to-right") on the stack.
int arg_count = args->length();
@@ -1098,7 +1105,7 @@
// Use the shared code stub to call the function.
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
+ CallFunctionStub call_function(arg_count, in_loop, flags);
frame_->CallStub(&call_function, arg_count + 1);
// Restore context and pop function from the stack.
@@ -1986,13 +1993,9 @@
frame_->EmitPush(r0);
// Store the caught exception in the catch variable.
- { Reference ref(this, node->catch_var());
- ASSERT(ref.is_slot());
- // Here we make use of the convenient property that it doesn't matter
- // whether a value is immediately on top of or underneath a zero-sized
- // reference.
- ref.SetValue(NOT_CONST_INIT);
- }
+ Variable* catch_var = node->catch_var()->var();
+ ASSERT(catch_var != NULL && catch_var->slot() != NULL);
+ StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
// Remove the exception from the stack.
frame_->Drop();
@@ -2298,12 +2301,21 @@
VirtualFrame::SpilledScope spilled_scope;
ASSERT(boilerplate->IsBoilerplate());
- // Create a new closure.
- frame_->EmitPush(cp);
__ mov(r0, Operand(boilerplate));
- frame_->EmitPush(r0);
- frame_->CallRuntime(Runtime::kNewClosure, 2);
- frame_->EmitPush(r0);
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
+ FastNewClosureStub stub;
+ frame_->EmitPush(r0);
+ frame_->CallStub(&stub, 1);
+ frame_->EmitPush(r0);
+ } else {
+ // Create a new closure.
+ frame_->EmitPush(cp);
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kNewClosure, 2);
+ frame_->EmitPush(r0);
+ }
}
@@ -2444,6 +2456,87 @@
}
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+ ASSERT(slot != NULL);
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->is_dynamic());
+
+ // For now, just do a runtime call.
+ frame_->EmitPush(cp);
+ __ mov(r0, Operand(slot->var()->name()));
+ frame_->EmitPush(r0);
+
+ if (init_state == CONST_INIT) {
+ // Same as the case for a normal store, but ignores attribute
+ // (e.g. READ_ONLY) of context slot so that we can initialize
+ // const properties (introduced via eval("const foo = (some
+ // expr);")). Also, uses the current function context instead of
+ // the top context.
+ //
+ // Note that we must declare the foo upon entry of eval(), via a
+ // context slot declaration, but we cannot initialize it at the
+ // same time, because the const declaration may be at the end of
+ // the eval code (sigh...) and the const variable may have been
+ // used before (where its value is 'undefined'). Thus, we can only
+ // do the initialization when we actually encounter the expression
+ // and when the expression operands are defined and valid, and
+ // thus we need the split into 2 operations: declaration of the
+ // context slot followed by initialization.
+ frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
+ }
+ // Storing a variable must keep the (new) value on the expression
+ // stack. This is necessary for compiling assignment expressions.
+ frame_->EmitPush(r0);
+
+ } else {
+ ASSERT(!slot->var()->is_dynamic());
+
+ JumpTarget exit;
+ if (init_state == CONST_INIT) {
+ ASSERT(slot->var()->mode() == Variable::CONST);
+ // Only the first const initialization must be executed (the slot
+ // still contains 'the hole' value). When the assignment is
+ // executed, the code is identical to a normal store (see below).
+ Comment cmnt(masm_, "[ Init const");
+ __ ldr(r2, SlotOperand(slot, r2));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r2, ip);
+ exit.Branch(ne);
+ }
+
+ // We must execute the store. Storing a variable must keep the
+ // (new) value on the stack. This is necessary for compiling
+ // assignment expressions.
+ //
+ // Note: We will reach here even with slot->var()->mode() ==
+ // Variable::CONST because of const declarations which will
+ // initialize consts to 'the hole' value and by doing so, end up
+ // calling this code. r2 may be loaded with context; used below in
+ // RecordWrite.
+ frame_->EmitPop(r0);
+ __ str(r0, SlotOperand(slot, r2));
+ frame_->EmitPush(r0);
+ if (slot->type() == Slot::CONTEXT) {
+ // Skip write barrier if the written value is a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ exit.Branch(eq);
+ // r2 is loaded with context when calling SlotOperand above.
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ mov(r3, Operand(offset));
+ __ RecordWrite(r2, r3, r1);
+ }
+ // If we definitely did not jump over the assignment, we do not need
+ // to bind the exit label. Doing so can defeat peephole
+ // optimization.
+ if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
+ exit.Bind();
+ }
+ }
+}
+
+
void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
Register tmp,
@@ -2601,42 +2694,6 @@
}
-// This deferred code stub will be used for creating the boilerplate
-// by calling Runtime_CreateObjectLiteralBoilerplate.
-// Each created boilerplate is stored in the JSFunction and they are
-// therefore context dependent.
-class DeferredObjectLiteral: public DeferredCode {
- public:
- explicit DeferredObjectLiteral(ObjectLiteral* node) : node_(node) {
- set_comment("[ DeferredObjectLiteral");
- }
-
- virtual void Generate();
-
- private:
- ObjectLiteral* node_;
-};
-
-
-void DeferredObjectLiteral::Generate() {
- // Argument is passed in r1.
-
- // If the entry is undefined we call the runtime system to compute
- // the literal.
- // Literal array (0).
- __ push(r1);
- // Literal index (1).
- __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
- __ push(r0);
- // Constant properties (2).
- __ mov(r0, Operand(node_->constant_properties()));
- __ push(r0);
- __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
- __ mov(r2, Operand(r0));
- // Result is returned in r2.
-}
-
-
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
@@ -2644,39 +2701,22 @@
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ObjectLiteral");
- DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node);
-
- // Retrieve the literal array and check the allocated entry.
-
// Load the function of this activation.
- __ ldr(r1, frame_->Function());
-
- // Load the literals array of the function.
- __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ ldr(r2, FieldMemOperand(r1, literal_offset));
-
- // Check whether we need to materialize the object literal boilerplate.
- // If so, jump to the deferred code.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r2, Operand(ip));
- deferred->Branch(eq);
- deferred->BindExit();
-
- // Push the object literal boilerplate.
- frame_->EmitPush(r2);
-
- // Clone the boilerplate object.
- Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
- if (node->depth() == 1) {
- clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ __ ldr(r2, frame_->Function());
+ // Literal array.
+ __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
+ // Literal index.
+ __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
+ // Constant properties.
+ __ mov(r0, Operand(node->constant_properties()));
+ frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
+ if (node->depth() > 1) {
+ frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ } else {
+ frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
}
- frame_->CallRuntime(clone_function_id, 1);
frame_->EmitPush(r0); // save the result
- // r0: cloned object literal
+ // r0: created object literal
for (int i = 0; i < node->properties()->length(); i++) {
ObjectLiteral::Property* property = node->properties()->at(i);
@@ -2724,42 +2764,6 @@
}
-// This deferred code stub will be used for creating the boilerplate
-// by calling Runtime_CreateArrayLiteralBoilerplate.
-// Each created boilerplate is stored in the JSFunction and they are
-// therefore context dependent.
-class DeferredArrayLiteral: public DeferredCode {
- public:
- explicit DeferredArrayLiteral(ArrayLiteral* node) : node_(node) {
- set_comment("[ DeferredArrayLiteral");
- }
-
- virtual void Generate();
-
- private:
- ArrayLiteral* node_;
-};
-
-
-void DeferredArrayLiteral::Generate() {
- // Argument is passed in r1.
-
- // If the entry is undefined we call the runtime system to computed
- // the literal.
- // Literal array (0).
- __ push(r1);
- // Literal index (1).
- __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
- __ push(r0);
- // Constant properties (2).
- __ mov(r0, Operand(node_->literals()));
- __ push(r0);
- __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
- __ mov(r2, Operand(r0));
- // Result is returned in r2.
-}
-
-
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
@@ -2767,39 +2771,22 @@
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ArrayLiteral");
- DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node);
-
- // Retrieve the literal array and check the allocated entry.
-
// Load the function of this activation.
- __ ldr(r1, frame_->Function());
-
- // Load the literals array of the function.
- __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ ldr(r2, FieldMemOperand(r1, literal_offset));
-
- // Check whether we need to materialize the object literal boilerplate.
- // If so, jump to the deferred code.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r2, Operand(ip));
- deferred->Branch(eq);
- deferred->BindExit();
-
- // Push the object literal boilerplate.
- frame_->EmitPush(r2);
-
- // Clone the boilerplate object.
- Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
- if (node->depth() == 1) {
- clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ __ ldr(r2, frame_->Function());
+ // Literals array.
+ __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
+ // Literal index.
+ __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
+ // Constant elements.
+ __ mov(r0, Operand(node->constant_elements()));
+ frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
+ if (node->depth() > 1) {
+ frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
+ } else {
+ frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
}
- frame_->CallRuntime(clone_function_id, 1);
frame_->EmitPush(r0); // save the result
- // r0: cloned object literal
+ // r0: created object literal
// Generate code to set the elements in the array that are not
// literals.
@@ -2998,20 +2985,22 @@
frame_->EmitPush(r2);
}
+ // Push the receiver.
+ __ ldr(r1, frame_->Receiver());
+ frame_->EmitPush(r1);
+
// Resolve the call.
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
// Touch up stack with the right values for the function and the receiver.
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
- __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
+ __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ str(r1, MemOperand(sp, arg_count * kPointerSize));
// Call the function.
CodeForSourcePosition(node->position());
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
+ CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
frame_->CallStub(&call_function, arg_count + 1);
__ ldr(cp, frame_->Context());
@@ -3068,7 +3057,7 @@
frame_->EmitPush(r1); // receiver
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
frame_->EmitPush(r0);
} else if (property != NULL) {
@@ -3121,7 +3110,7 @@
}
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
frame_->EmitPush(r0);
}
@@ -3137,7 +3126,7 @@
LoadGlobalReceiver(r0);
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
frame_->EmitPush(r0);
}
ASSERT(frame_->height() == original_height + 1);
@@ -3544,21 +3533,6 @@
}
-void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
- LoadAndSpill(args->at(0));
- switch (op) {
- case SIN:
- frame_->CallRuntime(Runtime::kMath_sin, 1);
- break;
- case COS:
- frame_->CallRuntime(Runtime::kMath_cos, 1);
- break;
- }
- frame_->EmitPush(r0);
-}
-
-
void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
@@ -3570,6 +3544,42 @@
}
+void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(3, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+
+ frame_->CallRuntime(Runtime::kSubString, 3);
+ frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ frame_->CallRuntime(Runtime::kStringCompare, 2);
+ frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
+ ASSERT_EQ(4, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+ Load(args->at(3));
+
+ frame_->CallRuntime(Runtime::kRegExpExec, 4);
+ frame_->EmitPush(r0);
+}
+
+
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
@@ -3713,7 +3723,7 @@
bool overwrite =
(node->expression()->AsBinaryOperation() != NULL &&
node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- UnarySubStub stub(overwrite);
+ GenericUnaryOpStub stub(Token::SUB, overwrite);
frame_->CallStub(&stub, 0);
break;
}
@@ -4343,83 +4353,7 @@
case SLOT: {
Comment cmnt(masm, "[ Store to Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
- ASSERT(slot != NULL);
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- // For now, just do a runtime call.
- frame->EmitPush(cp);
- __ mov(r0, Operand(slot->var()->name()));
- frame->EmitPush(r0);
-
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize
- // const properties (introduced via eval("const foo = (some
- // expr);")). Also, uses the current function context instead of
- // the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the
- // same time, because the const declaration may be at the end of
- // the eval code (sigh...) and the const variable may have been
- // used before (where its value is 'undefined'). Thus, we can only
- // do the initialization when we actually encounter the expression
- // and when the expression operands are defined and valid, and
- // thus we need the split into 2 operations: declaration of the
- // context slot followed by initialization.
- frame->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- frame->CallRuntime(Runtime::kStoreContextSlot, 3);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling assignment expressions.
- frame->EmitPush(r0);
-
- } else {
- ASSERT(!slot->var()->is_dynamic());
-
- JumpTarget exit;
- if (init_state == CONST_INIT) {
- ASSERT(slot->var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is
- // executed, the code is identical to a normal store (see below).
- Comment cmnt(masm, "[ Init const");
- __ ldr(r2, cgen_->SlotOperand(slot, r2));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r2, ip);
- exit.Branch(ne);
- }
-
- // We must execute the store. Storing a variable must keep the
- // (new) value on the stack. This is necessary for compiling
- // assignment expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will
- // initialize consts to 'the hole' value and by doing so, end up
- // calling this code. r2 may be loaded with context; used below in
- // RecordWrite.
- frame->EmitPop(r0);
- __ str(r0, cgen_->SlotOperand(slot, r2));
- frame->EmitPush(r0);
- if (slot->type() == Slot::CONTEXT) {
- // Skip write barrier if the written value is a smi.
- __ tst(r0, Operand(kSmiTagMask));
- exit.Branch(eq);
- // r2 is loaded with context when calling SlotOperand above.
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r3, Operand(offset));
- __ RecordWrite(r2, r3, r1);
- }
- // If we definitely did not jump over the assignment, we do not need
- // to bind the exit label. Doing so can defeat peephole
- // optimization.
- if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
- exit.Bind();
- }
- }
+ cgen_->StoreToSlot(slot, init_state);
break;
}
@@ -4466,6 +4400,103 @@
}
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+ // Clone the boilerplate in new space. Set the context to the
+ // current context in cp.
+ Label gc;
+
+ // Pop the boilerplate function from the stack.
+ __ pop(r3);
+
+ // Attempt to allocate new JSFunction in new space.
+ __ AllocateInNewSpace(JSFunction::kSize / kPointerSize,
+ r0,
+ r1,
+ r2,
+ &gc,
+ TAG_OBJECT);
+
+ // Compute the function map in the current global context and set that
+ // as the map of the allocated object.
+ __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
+ __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+
+ // Clone the rest of the boilerplate fields. We don't have to update
+ // the write barrier because the allocated object is in new space.
+ for (int offset = kPointerSize;
+ offset < JSFunction::kSize;
+ offset += kPointerSize) {
+ if (offset == JSFunction::kContextOffset) {
+ __ str(cp, FieldMemOperand(r0, offset));
+ } else {
+ __ ldr(r1, FieldMemOperand(r3, offset));
+ __ str(r1, FieldMemOperand(r0, offset));
+ }
+ }
+
+ // Return result. The argument boilerplate has been popped already.
+ __ Ret();
+
+ // Create a new closure through the slower runtime call.
+ __ bind(&gc);
+ __ push(cp);
+ __ push(r3);
+ __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+
+ // Attempt to allocate the context in new space.
+ __ AllocateInNewSpace(length + (FixedArray::kHeaderSize / kPointerSize),
+ r0,
+ r1,
+ r2,
+ &gc,
+ TAG_OBJECT);
+
+ // Load the function from the stack.
+ __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+
+ // Setup the object header.
+ __ LoadRoot(r2, Heap::kContextMapRootIndex);
+ __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ mov(r2, Operand(length));
+ __ str(r2, FieldMemOperand(r0, Array::kLengthOffset));
+
+ // Setup the fixed slots.
+ __ mov(r1, Operand(Smi::FromInt(0)));
+ __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
+
+ // Copy the global object from the surrounding context.
+ __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+
+ // Initialize the rest of the slots to undefined.
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
+ }
+
+ // Remove the on-stack argument and return.
+ __ mov(cp, r0);
+ __ pop();
+ __ Ret();
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
+}
+
+
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0
// (31 instead of 32).
@@ -4692,94 +4723,105 @@
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc) {
+ Condition cc,
+ bool never_nan_nan) {
Label not_identical;
+ Label heap_number, return_equal;
+ Register exp_mask_reg = r5;
__ cmp(r0, Operand(r1));
__ b(ne, ¬_identical);
- Register exp_mask_reg = r5;
- __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+ // The two objects are identical. If we know that one of them isn't NaN then
+ // we now know they test equal.
+ if (cc != eq || !never_nan_nan) {
+ __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
- // so we do the second best thing - test it ourselves.
- Label heap_number, return_equal;
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cc == lt || cc == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
- __ b(ge, slow);
- } else {
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
- __ b(eq, &heap_number);
- // Comparing JS objects with <=, >= is complicated.
- if (cc != eq) {
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cc == lt || cc == gt) {
+ __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
__ b(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but (undefined <= undefined)
- // == false! See ECMAScript 11.8.5.
- if (cc == le || cc == ge) {
- __ cmp(r4, Operand(ODDBALL_TYPE));
- __ b(ne, &return_equal);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, Operand(r2));
- __ b(ne, &return_equal);
- if (cc == le) {
- __ mov(r0, Operand(GREATER)); // undefined <= undefined should fail.
- } else {
- __ mov(r0, Operand(LESS)); // undefined >= undefined should fail.
+ } else {
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(eq, &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cc != eq) {
+ __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cc == le || cc == ge) {
+ __ cmp(r4, Operand(ODDBALL_TYPE));
+ __ b(ne, &return_equal);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, Operand(r2));
+ __ b(ne, &return_equal);
+ if (cc == le) {
+ // undefined <= undefined should fail.
+ __ mov(r0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ mov(r0, Operand(LESS));
+ }
+ __ mov(pc, Operand(lr)); // Return.
}
- __ mov(pc, Operand(lr)); // Return.
}
}
}
+
__ bind(&return_equal);
if (cc == lt) {
__ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
} else if (cc == gt) {
__ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
} else {
- __ mov(r0, Operand(0)); // Things are <=, >=, ==, === themselves.
+ __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
}
__ mov(pc, Operand(lr)); // Return.
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cc != lt && cc != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ and_(r3, r2, Operand(exp_mask_reg));
- __ cmp(r3, Operand(exp_mask_reg));
- __ b(ne, &return_equal);
+ if (cc != eq || !never_nan_nan) {
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cc != lt && cc != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
- // Or with all low-bits of mantissa.
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ orr(r0, r3, Operand(r2), SetCC);
- // For equal we already have the right value in r0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if not
- // (it's a NaN). For <= and >= we need to load r0 with the failing value
- // if it's a NaN.
- if (cc != eq) {
- // All-zero means Infinity means equal.
- __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
- if (cc == le) {
- __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ and_(r3, r2, Operand(exp_mask_reg));
+ __ cmp(r3, Operand(exp_mask_reg));
+ __ b(ne, &return_equal);
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ // Or with all low-bits of mantissa.
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ orr(r0, r3, Operand(r2), SetCC);
+ // For equal we already have the right value in r0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load r0 with the failing
+ // value if it's a NaN.
+ if (cc != eq) {
+ // All-zero means Infinity means equal.
+ __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
+ if (cc == le) {
+ __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
+ }
}
+ __ mov(pc, Operand(lr)); // Return.
}
- __ mov(pc, Operand(lr)); // Return.
+ // No fall through here.
}
- // No fall through here.
__ bind(¬_identical);
}
@@ -4787,7 +4829,7 @@
// See comment at call site.
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
- Label* rhs_not_nan,
+ Label* lhs_not_nan,
Label* slow,
bool strict) {
Label lhs_is_smi;
@@ -4825,7 +4867,7 @@
// We now have both loaded as doubles but we can skip the lhs nan check
// since it's a Smi.
__ pop(lr);
- __ jmp(rhs_not_nan);
+ __ jmp(lhs_not_nan);
__ bind(&lhs_is_smi);
// Lhs is a Smi. Check whether the non-smi is a heap number.
@@ -4861,37 +4903,39 @@
}
-void EmitNanCheck(MacroAssembler* masm, Label* rhs_not_nan, Condition cc) {
+void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) {
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register lhs_exponent = exp_first ? r0 : r1;
- Register rhs_exponent = exp_first ? r2 : r3;
- Register lhs_mantissa = exp_first ? r1 : r0;
- Register rhs_mantissa = exp_first ? r3 : r2;
+ Register rhs_exponent = exp_first ? r0 : r1;
+ Register lhs_exponent = exp_first ? r2 : r3;
+ Register rhs_mantissa = exp_first ? r1 : r0;
+ Register lhs_mantissa = exp_first ? r3 : r2;
Label one_is_nan, neither_is_nan;
+ Label lhs_not_nan_exp_mask_is_loaded;
Register exp_mask_reg = r5;
__ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
- __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
- __ cmp(r4, Operand(exp_mask_reg));
- __ b(ne, rhs_not_nan);
- __ mov(r4,
- Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
- SetCC);
- __ b(ne, &one_is_nan);
- __ cmp(rhs_mantissa, Operand(0));
- __ b(ne, &one_is_nan);
-
- __ bind(rhs_not_nan);
- __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
__ and_(r4, lhs_exponent, Operand(exp_mask_reg));
__ cmp(r4, Operand(exp_mask_reg));
- __ b(ne, &neither_is_nan);
+ __ b(ne, &lhs_not_nan_exp_mask_is_loaded);
__ mov(r4,
Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
SetCC);
__ b(ne, &one_is_nan);
__ cmp(lhs_mantissa, Operand(0));
+ __ b(ne, &one_is_nan);
+
+ __ bind(lhs_not_nan);
+ __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+ __ bind(&lhs_not_nan_exp_mask_is_loaded);
+ __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
+ __ cmp(r4, Operand(exp_mask_reg));
+ __ b(ne, &neither_is_nan);
+ __ mov(r4,
+ Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
+ SetCC);
+ __ b(ne, &one_is_nan);
+ __ cmp(rhs_mantissa, Operand(0));
__ b(eq, &neither_is_nan);
__ bind(&one_is_nan);
@@ -4911,21 +4955,21 @@
// See comment at call site.
static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register lhs_exponent = exp_first ? r0 : r1;
- Register rhs_exponent = exp_first ? r2 : r3;
- Register lhs_mantissa = exp_first ? r1 : r0;
- Register rhs_mantissa = exp_first ? r3 : r2;
+ Register rhs_exponent = exp_first ? r0 : r1;
+ Register lhs_exponent = exp_first ? r2 : r3;
+ Register rhs_mantissa = exp_first ? r1 : r0;
+ Register lhs_mantissa = exp_first ? r3 : r2;
// r0, r1, r2, r3 have the two doubles. Neither is a NaN.
if (cc == eq) {
// Doubles are not equal unless they have the same bit pattern.
// Exception: 0 and -0.
- __ cmp(lhs_mantissa, Operand(rhs_mantissa));
- __ orr(r0, lhs_mantissa, Operand(rhs_mantissa), LeaveCC, ne);
+ __ cmp(rhs_mantissa, Operand(lhs_mantissa));
+ __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
// Return non-zero if the numbers are unequal.
__ mov(pc, Operand(lr), LeaveCC, ne);
- __ sub(r0, lhs_exponent, Operand(rhs_exponent), SetCC);
+ __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
// If exponents are equal then return 0.
__ mov(pc, Operand(lr), LeaveCC, eq);
@@ -4935,12 +4979,12 @@
// We start by seeing if the mantissas (that are equal) or the bottom
// 31 bits of the rhs exponent are non-zero. If so we return not
// equal.
- __ orr(r4, rhs_mantissa, Operand(rhs_exponent, LSL, kSmiTagSize), SetCC);
+ __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
__ mov(r0, Operand(r4), LeaveCC, ne);
__ mov(pc, Operand(lr), LeaveCC, ne); // Return conditionally.
// Now they are equal if and only if the lhs exponent is zero in its
// low 31 bits.
- __ mov(r0, Operand(lhs_exponent, LSL, kSmiTagSize));
+ __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
__ mov(pc, Operand(lr));
} else {
// Call a native function to do a comparison between two non-NaNs.
@@ -4979,6 +5023,14 @@
// Check for oddballs: true, false, null, undefined.
__ cmp(r3, Operand(ODDBALL_TYPE));
__ b(eq, &return_not_equal);
+
+ // Now that we have the types we might as well check for symbol-symbol.
+ // Ensure that no non-strings have the symbol bit set.
+ ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ ASSERT(kSymbolTag != 0);
+ __ and_(r2, r2, Operand(r3));
+ __ tst(r2, Operand(kIsSymbolMask));
+ __ b(ne, &return_not_equal);
}
@@ -4987,9 +5039,10 @@
Label* both_loaded_as_doubles,
Label* not_heap_numbers,
Label* slow) {
- __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
+ __ CompareObjectType(r0, r3, r2, HEAP_NUMBER_TYPE);
__ b(ne, not_heap_numbers);
- __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r2, r3);
__ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
// Both are heap numbers. Load them up then jump to the code we have
@@ -5005,12 +5058,13 @@
// Fast negative check for symbol-to-symbol equality.
static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
// r2 is object type of r0.
- __ tst(r2, Operand(kIsNotStringMask));
- __ b(ne, slow);
+ // Ensure that no non-strings have the symbol bit set.
+ ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ ASSERT(kSymbolTag != 0);
__ tst(r2, Operand(kIsSymbolMask));
__ b(eq, slow);
- __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, slow);
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
__ tst(r3, Operand(kIsSymbolMask));
__ b(eq, slow);
@@ -5025,14 +5079,14 @@
// positive or negative to indicate the result of the comparison.
void CompareStub::Generate(MacroAssembler* masm) {
Label slow; // Call builtin.
- Label not_smis, both_loaded_as_doubles, rhs_not_nan;
+ Label not_smis, both_loaded_as_doubles, lhs_not_nan;
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc_);
+ EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
@@ -5045,32 +5099,46 @@
// 1) Return the answer.
// 2) Go to slow.
// 3) Fall through to both_loaded_as_doubles.
- // 4) Jump to rhs_not_nan.
+ // 4) Jump to lhs_not_nan.
// In cases 3 and 4 we have found out we were dealing with a number-number
// comparison and the numbers have been loaded into r0, r1, r2, r3 as doubles.
- EmitSmiNonsmiComparison(masm, &rhs_not_nan, &slow, strict_);
+ EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
__ bind(&both_loaded_as_doubles);
- // r0, r1, r2, r3 are the double representations of the left hand side
- // and the right hand side.
-
- // Checks for NaN in the doubles we have loaded. Can return the answer or
- // fall through if neither is a NaN. Also binds rhs_not_nan.
- EmitNanCheck(masm, &rhs_not_nan, cc_);
+ // r0, r1, r2, r3 are the double representations of the right hand side
+ // and the left hand side.
if (CpuFeatures::IsSupported(VFP3)) {
+ __ bind(&lhs_not_nan);
CpuFeatures::Scope scope(VFP3);
+ Label no_nan;
// ARMv7 VFP3 instructions to implement double precision comparison.
- __ fmdrr(d6, r0, r1);
- __ fmdrr(d7, r2, r3);
+ __ vmov(d6, r0, r1);
+ __ vmov(d7, r2, r3);
- __ fcmp(d6, d7);
- __ vmrs(pc);
- __ mov(r0, Operand(0), LeaveCC, eq);
- __ mov(r0, Operand(1), LeaveCC, lt);
- __ mvn(r0, Operand(0), LeaveCC, gt);
+ __ vcmp(d7, d6);
+ __ vmrs(pc); // Move vector status bits to normal status bits.
+ Label nan;
+ __ b(vs, &nan);
+ __ mov(r0, Operand(EQUAL), LeaveCC, eq);
+ __ mov(r0, Operand(LESS), LeaveCC, lt);
+ __ mov(r0, Operand(GREATER), LeaveCC, gt);
+ __ mov(pc, Operand(lr));
+
+ __ bind(&nan);
+ // If one of the sides was a NaN then the v flag is set. Load r0 with
+ // whatever it takes to make the comparison fail, since comparisons with NaN
+ // always fail.
+ if (cc_ == lt || cc_ == le) {
+ __ mov(r0, Operand(GREATER));
+ } else {
+ __ mov(r0, Operand(LESS));
+ }
__ mov(pc, Operand(lr));
} else {
+ // Checks for NaN in the doubles we have loaded. Can return the answer or
+ // fall through if neither is a NaN. Also binds lhs_not_nan.
+ EmitNanCheck(masm, &lhs_not_nan, cc_);
// Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
// answer. Never falls through.
EmitTwoNonNanDoubleComparison(masm, cc_);
@@ -5089,26 +5157,26 @@
// Check for heap-number-heap-number comparison. Can jump to slow case,
// or load both doubles into r0, r1, r2, r3 and jump to the code that handles
// that case. If the inputs are not doubles then jumps to check_for_symbols.
- // In this case r2 will contain the type of r0.
+ // In this case r2 will contain the type of r0. Never falls through.
EmitCheckForTwoHeapNumbers(masm,
&both_loaded_as_doubles,
&check_for_symbols,
&slow);
__ bind(&check_for_symbols);
- if (cc_ == eq) {
+ // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
+ // symbols.
+ if (cc_ == eq && !strict_) {
// Either jumps to slow or returns the answer. Assumes that r2 is the type
// of r0 on entry.
EmitCheckForSymbols(masm, &slow);
}
__ bind(&slow);
- __ push(lr);
__ push(r1);
__ push(r0);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
- int arg_count = 1; // Not counting receiver.
if (cc_ == eq) {
native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
@@ -5120,16 +5188,13 @@
ASSERT(cc_ == gt || cc_ == ge); // remaining cases
ncr = LESS;
}
- arg_count++;
__ mov(r0, Operand(Smi::FromInt(ncr)));
__ push(r0);
}
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ InvokeBuiltin(native, CALL_JS);
- __ cmp(r0, Operand(0));
- __ pop(pc);
+ __ InvokeBuiltin(native, JUMP_JS);
}
@@ -5331,22 +5396,22 @@
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement
// double precision, add, subtract, multiply, divide.
- __ fmdrr(d6, r0, r1);
- __ fmdrr(d7, r2, r3);
+ __ vmov(d6, r0, r1);
+ __ vmov(d7, r2, r3);
if (Token::MUL == operation) {
- __ fmuld(d5, d6, d7);
+ __ vmul(d5, d6, d7);
} else if (Token::DIV == operation) {
- __ fdivd(d5, d6, d7);
+ __ vdiv(d5, d6, d7);
} else if (Token::ADD == operation) {
- __ faddd(d5, d6, d7);
+ __ vadd(d5, d6, d7);
} else if (Token::SUB == operation) {
- __ fsubd(d5, d6, d7);
+ __ vsub(d5, d6, d7);
} else {
UNREACHABLE();
}
- __ fmrrd(r0, r1, d5);
+ __ vmov(r0, r1, d5);
__ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
__ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
@@ -5435,9 +5500,9 @@
// ARMv7 VFP3 instructions implementing double precision to integer
// conversion using round to zero.
__ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- __ fmdrr(d7, scratch2, scratch);
- __ ftosid(s15, d7);
- __ fmrs(dest, s15);
+ __ vmov(d7, scratch2, scratch);
+ __ vcvt(s15, d7);
+ __ vmov(dest, s15);
} else {
// Get the top bits of the mantissa.
__ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
@@ -5680,6 +5745,29 @@
}
+const char* GenericBinaryOpStub::GetName() {
+ if (name_ != NULL) return name_;
+ const int len = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(len);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+
+ OS::SNPrintF(Vector<char>(name_, len),
+ "GenericBinaryOpStub_%s_%s%s",
+ op_name,
+ overwrite_name,
+ specialized_on_rhs_ ? "_ConstantRhs" : 0);
+ return name_;
+}
+
+
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// r1 : x
// r0 : y
@@ -5932,7 +6020,9 @@
}
-void UnarySubStub::Generate(MacroAssembler* masm) {
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ ASSERT(op_ == Token::SUB);
+
Label undo;
Label slow;
Label not_smi;
@@ -6530,6 +6620,33 @@
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow;
+
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // function, receiver [, arguments]
+ Label receiver_is_value, receiver_is_js_object;
+ __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ BranchOnSmi(r1, &receiver_is_value);
+
+ // Check if the receiver is a valid JS object.
+ __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
+ __ b(ge, &receiver_is_js_object);
+
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(r1);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+ __ LeaveInternalFrame();
+ __ str(r0, MemOperand(sp, argc_ * kPointerSize));
+
+ __ bind(&receiver_is_js_object);
+ }
+
// Get the function to call from the stack.
// function, receiver [, arguments]
__ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
@@ -6556,10 +6673,53 @@
}
+const char* CompareStub::GetName() {
+ switch (cc_) {
+ case lt: return "CompareStub_LT";
+ case gt: return "CompareStub_GT";
+ case le: return "CompareStub_LE";
+ case ge: return "CompareStub_GE";
+ case ne: {
+ if (strict_) {
+ if (never_nan_nan_) {
+ return "CompareStub_NE_STRICT_NO_NAN";
+ } else {
+ return "CompareStub_NE_STRICT";
+ }
+ } else {
+ if (never_nan_nan_) {
+ return "CompareStub_NE_NO_NAN";
+ } else {
+ return "CompareStub_NE";
+ }
+ }
+ }
+ case eq: {
+ if (strict_) {
+ if (never_nan_nan_) {
+ return "CompareStub_EQ_STRICT_NO_NAN";
+ } else {
+ return "CompareStub_EQ_STRICT";
+ }
+ } else {
+ if (never_nan_nan_) {
+ return "CompareStub_EQ_NO_NAN";
+ } else {
+ return "CompareStub_EQ";
+ }
+ }
+ }
+ default: return "CompareStub";
+ }
+}
+
+
int CompareStub::MinorKey() {
- // Encode the two parameters in a unique 16 bit value.
- ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
- return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
+ // Encode the three parameters in a unique 16 bit value.
+ ASSERT((static_cast<unsigned>(cc_) >> 26) < (1 << 16));
+ int nnn_value = (never_nan_nan_ ? 2 : 0);
+ if (cc_ != eq) nnn_value = 0; // Avoid duplicate stubs.
+ return (static_cast<unsigned>(cc_) >> 26) | nnn_value | (strict_ ? 1 : 0);
}
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index ba7f936..f5de0eb 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -272,6 +272,9 @@
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+ // Store the value on top of the stack to a slot.
+ void StoreToSlot(Slot* slot, InitState init_state);
+
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
Register tmp,
@@ -301,7 +304,9 @@
bool reversed,
OverwriteMode mode);
- void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+ void CallWithArguments(ZoneList<Expression*>* arguments,
+ CallFunctionFlags flags,
+ int position);
// Control flow
void Branch(bool if_true, JumpTarget* target);
@@ -360,15 +365,18 @@
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
- // Fast support for Math.sin and Math.cos.
- enum MathOp { SIN, COS };
- void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
- inline void GenerateMathSin(ZoneList<Expression*>* args);
- inline void GenerateMathCos(ZoneList<Expression*>* args);
-
// Fast support for StringAdd.
void GenerateStringAdd(ZoneList<Expression*>* args);
+ // Fast support for SubString.
+ void GenerateSubString(ZoneList<Expression*>* args);
+
+ // Fast support for StringCompare.
+ void GenerateStringCompare(ZoneList<Expression*>* args);
+
+ // Support for direct calls from JavaScript to native RegExp code.
+ void GenerateRegExpExec(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -426,27 +434,6 @@
};
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) {}
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#if defined(DEBUG)
- void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
-#endif // defined(DEBUG)
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
class GenericBinaryOpStub : public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
@@ -455,13 +442,15 @@
: op_(op),
mode_(mode),
constant_rhs_(constant_rhs),
- specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { }
+ specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
+ name_(NULL) { }
private:
Token::Value op_;
OverwriteMode mode_;
int constant_rhs_;
bool specialized_on_rhs_;
+ char* name_;
static const int kMaxKnownRhs = 0x40000000;
@@ -506,22 +495,7 @@
return key;
}
- const char* GetName() {
- switch (op_) {
- case Token::ADD: return "GenericBinaryOpStub_ADD";
- case Token::SUB: return "GenericBinaryOpStub_SUB";
- case Token::MUL: return "GenericBinaryOpStub_MUL";
- case Token::DIV: return "GenericBinaryOpStub_DIV";
- case Token::MOD: return "GenericBinaryOpStub_MOD";
- case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
- case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
- case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
- case Token::SAR: return "GenericBinaryOpStub_SAR";
- case Token::SHL: return "GenericBinaryOpStub_SHL";
- case Token::SHR: return "GenericBinaryOpStub_SHR";
- default: return "GenericBinaryOpStub";
- }
- }
+ const char* GetName();
#ifdef DEBUG
void Print() {
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index a5a358b..4e39cda 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -61,28 +61,32 @@
reinterpret_cast<uint32_t>(start) + size;
register uint32_t flg asm("a3") = 0;
#ifdef __ARM_EABI__
- register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
#if defined (__arm__) && !defined(__thumb__)
// __arm__ may be defined in thumb mode.
+ register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
asm volatile(
"swi 0x0"
: "=r" (beg)
: "0" (beg), "r" (end), "r" (flg), "r" (scno));
#else
+ // r7 is reserved by the EABI in thumb mode.
asm volatile(
"@ Enter ARM Mode \n\t"
"adr r3, 1f \n\t"
"bx r3 \n\t"
".ALIGN 4 \n\t"
".ARM \n"
- "1: swi 0x0 \n\t"
+ "1: push {r7} \n\t"
+ "mov r7, %4 \n\t"
+ "swi 0x0 \n\t"
+ "pop {r7} \n\t"
"@ Enter THUMB Mode\n\t"
"adr r3, 2f+1 \n\t"
"bx r3 \n\t"
".THUMB \n"
"2: \n\t"
: "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg), "r" (scno)
+ : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
: "r3");
#endif
#else
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 2f9e78f..afed0fa 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -897,15 +897,14 @@
// void Decoder::DecodeTypeVFP(Instr* instr)
-// Implements the following VFP instructions:
-// fmsr: Sn = Rt
-// fmrs: Rt = Sn
-// fsitod: Dd = Sm
-// ftosid: Sd = Dm
-// Dd = faddd(Dn, Dm)
-// Dd = fsubd(Dn, Dm)
-// Dd = fmuld(Dn, Dm)
-// Dd = fdivd(Dn, Dm)
+// vmov: Sn = Rt
+// vmov: Rt = Sn
+// vcvt: Dd = Sm
+// vcvt: Sd = Dm
+// Dd = vadd(Dn, Dm)
+// Dd = vsub(Dn, Dm)
+// Dd = vmul(Dn, Dm)
+// Dd = vdiv(Dn, Dm)
// vcmp(Dd, Dm)
// VMRS
void Decoder::DecodeTypeVFP(Instr* instr) {
@@ -997,8 +996,8 @@
// Decode Type 6 coprocessor instructions.
-// Dm = fmdrr(Rt, Rt2)
-// <Rt, Rt2> = fmrrd(Dm)
+// Dm = vmov(Rt, Rt2)
+// <Rt, Rt2> = vmov(Dm)
void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
index 45cab55..4256e47 100644
--- a/src/arm/fast-codegen-arm.cc
+++ b/src/arm/fast-codegen-arm.cc
@@ -214,105 +214,310 @@
}
-void FastCodeGenerator::Move(Expression::Context context, Register source) {
+void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ // Nothing to do.
+ break;
+
+ case Expression::kValue:
+ // Move value into place.
+ switch (location_) {
+ case kAccumulator:
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ break;
+ case kStack:
+ __ push(reg);
+ break;
+ }
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // Push an extra copy of the value in case it's needed.
+ __ push(reg);
+ // Fall through.
+
+ case Expression::kTest:
+ // We always call the runtime on ARM, so push the value as argument.
+ __ push(reg);
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Nothing to do.
+ break;
+ case Expression::kValue:
+ case Expression::kTest:
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // On ARM we have to move the value into a register to do anything
+ // with it.
+ Move(result_register(), slot);
+ Apply(context, result_register());
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
break;
+ // Nothing to do.
case Expression::kValue:
- __ push(source);
- break;
case Expression::kTest:
- TestAndBranch(source, true_label_, false_label_);
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // On ARM we have to move the value into a register to do anything
+ // with it.
+ __ mov(result_register(), Operand(lit->handle()));
+ Apply(context, result_register());
break;
+ }
+}
+
+
+void FastCodeGenerator::ApplyTOS(Expression::Context context) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ __ Drop(1);
+ break;
+
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ pop(result_register());
+ break;
+ case kStack:
+ break;
+ }
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // Duplicate the value on the stack in case it's needed.
+ __ ldr(ip, MemOperand(sp));
+ __ push(ip);
+ // Fall through.
+
+ case Expression::kTest:
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::DropAndApply(int count,
+ Expression::Context context,
+ Register reg) {
+ ASSERT(count > 0);
+ ASSERT(!reg.is(sp));
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ __ Drop(count);
+ break;
+
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ Drop(count);
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ break;
+ case kStack:
+ if (count > 1) __ Drop(count - 1);
+ __ str(reg, MemOperand(sp));
+ break;
+ }
+ break;
+
+ case Expression::kTest:
+ if (count > 1) __ Drop(count - 1);
+ __ str(reg, MemOperand(sp));
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ if (count == 1) {
+ __ str(reg, MemOperand(sp));
+ __ push(reg);
+ } else { // count > 1
+ __ Drop(count - 2);
+ __ str(reg, MemOperand(sp, kPointerSize));
+ __ str(reg, MemOperand(sp));
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Apply(Expression::Context context,
+ Label* materialize_true,
+ Label* materialize_false) {
+ switch (context) {
+ case Expression::kUninitialized:
+
+ case Expression::kEffect:
+ ASSERT_EQ(materialize_true, materialize_false);
+ __ bind(materialize_true);
+ break;
+
+ case Expression::kValue: {
+ Label done;
+ __ bind(materialize_true);
+ __ mov(result_register(), Operand(Factory::true_value()));
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ mov(result_register(), Operand(Factory::false_value()));
+ __ bind(&done);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ break;
+ }
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ __ bind(materialize_true);
+ __ mov(result_register(), Operand(Factory::true_value()));
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ __ jmp(true_label_);
+ break;
+
+ case Expression::kTestValue:
+ __ bind(materialize_false);
+ __ mov(result_register(), Operand(Factory::false_value()));
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ __ jmp(false_label_);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::DoTest(Expression::Context context) {
+ // The value to test is pushed on the stack, and duplicated on the stack
+ // if necessary (for value/test and test/value contexts).
+ ASSERT_NE(NULL, true_label_);
+ ASSERT_NE(NULL, false_label_);
+
+ // Call the runtime to find the boolean value of the source and then
+ // translate it into control flow to the pair of labels.
+ __ CallRuntime(Runtime::kToBool, 1);
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r0, ip);
+
+ // Complete based on the context.
+ switch (context) {
+ case Expression::kUninitialized:
+ case Expression::kEffect:
+ case Expression::kValue:
+ UNREACHABLE();
+
+ case Expression::kTest:
+ __ b(eq, true_label_);
+ __ jmp(false_label_);
+ break;
+
case Expression::kValueTest: {
Label discard;
- __ push(source);
- TestAndBranch(source, true_label_, &discard);
+ switch (location_) {
+ case kAccumulator:
+ __ b(ne, &discard);
+ __ pop(result_register());
+ __ jmp(true_label_);
+ break;
+ case kStack:
+ __ b(eq, true_label_);
+ break;
+ }
__ bind(&discard);
- __ pop();
+ __ Drop(1);
__ jmp(false_label_);
break;
}
+
case Expression::kTestValue: {
Label discard;
- __ push(source);
- TestAndBranch(source, &discard, false_label_);
+ switch (location_) {
+ case kAccumulator:
+ __ b(eq, &discard);
+ __ pop(result_register());
+ __ jmp(false_label_);
+ break;
+ case kStack:
+ __ b(ne, false_label_);
+ break;
+ }
__ bind(&discard);
- __ pop();
+ __ Drop(1);
__ jmp(true_label_);
+ break;
}
}
}
-template <>
-MemOperand FastCodeGenerator::CreateSlotOperand<MemOperand>(
- Slot* source,
- Register scratch) {
- switch (source->type()) {
+MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+ switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
- return MemOperand(fp, SlotOffset(source));
+ return MemOperand(fp, SlotOffset(slot));
case Slot::CONTEXT: {
int context_chain_length =
- function_->scope()->ContextChainLength(source->var()->scope());
+ function_->scope()->ContextChainLength(slot->var()->scope());
__ LoadContext(scratch, context_chain_length);
- return CodeGenerator::ContextOperand(scratch, source->index());
- break;
+ return CodeGenerator::ContextOperand(scratch, slot->index());
}
case Slot::LOOKUP:
- UNIMPLEMENTED();
- // Fall-through.
- default:
UNREACHABLE();
- return MemOperand(r0, 0); // Dead code to make the compiler happy.
}
+ UNREACHABLE();
+ return MemOperand(r0, 0);
}
-void FastCodeGenerator::Move(Register dst, Slot* source) {
- // Use dst as scratch.
- MemOperand location = CreateSlotOperand<MemOperand>(source, dst);
- __ ldr(dst, location);
-}
-
-
-
-void FastCodeGenerator::Move(Expression::Context context,
- Slot* source,
- Register scratch) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- break;
- case Expression::kValue: // Fall through.
- case Expression::kTest: // Fall through.
- case Expression::kValueTest: // Fall through.
- case Expression::kTestValue:
- Move(scratch, source);
- Move(context, scratch);
- break;
- }
-}
-
-
-void FastCodeGenerator::Move(Expression::Context context, Literal* expr) {
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- break;
- case Expression::kValue: // Fall through.
- case Expression::kTest: // Fall through.
- case Expression::kValueTest: // Fall through.
- case Expression::kTestValue:
- __ mov(ip, Operand(expr->handle()));
- Move(context, ip);
- break;
- }
+void FastCodeGenerator::Move(Register destination, Slot* source) {
+ // Use destination as scratch.
+ MemOperand slot_operand = EmitSlotSearch(source, destination);
+ __ ldr(destination, slot_operand);
}
@@ -320,95 +525,18 @@
Register src,
Register scratch1,
Register scratch2) {
- switch (dst->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- __ str(src, MemOperand(fp, SlotOffset(dst)));
- break;
- case Slot::CONTEXT: {
- int context_chain_length =
- function_->scope()->ContextChainLength(dst->var()->scope());
- __ LoadContext(scratch1, context_chain_length);
- int index = Context::SlotOffset(dst->index());
- __ mov(scratch2, Operand(index));
- __ str(src, MemOperand(scratch1, index));
- __ RecordWrite(scratch1, scratch2, src);
- break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- default:
- UNREACHABLE();
+ ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
+ ASSERT(!scratch1.is(src) && !scratch2.is(src));
+ MemOperand location = EmitSlotSearch(dst, scratch1);
+ __ str(src, location);
+ // Emit the write barrier code if the location is in the heap.
+ if (dst->type() == Slot::CONTEXT) {
+ __ mov(scratch2, Operand(Context::SlotOffset(dst->index())));
+ __ RecordWrite(scratch1, scratch2, src);
}
}
-
-void FastCodeGenerator::DropAndMove(Expression::Context context,
- Register source,
- int drop_count) {
- ASSERT(drop_count > 0);
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- __ add(sp, sp, Operand(drop_count * kPointerSize));
- break;
- case Expression::kValue:
- if (drop_count > 1) {
- __ add(sp, sp, Operand((drop_count - 1) * kPointerSize));
- }
- __ str(source, MemOperand(sp));
- break;
- case Expression::kTest:
- ASSERT(!source.is(sp));
- __ add(sp, sp, Operand(drop_count * kPointerSize));
- TestAndBranch(source, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (drop_count > 1) {
- __ add(sp, sp, Operand((drop_count - 1) * kPointerSize));
- }
- __ str(source, MemOperand(sp));
- TestAndBranch(source, true_label_, &discard);
- __ bind(&discard);
- __ pop();
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (drop_count > 1) {
- __ add(sp, sp, Operand((drop_count - 1) * kPointerSize));
- }
- __ str(source, MemOperand(sp));
- TestAndBranch(source, &discard, false_label_);
- __ bind(&discard);
- __ pop();
- __ jmp(true_label_);
- break;
- }
- }
-}
-
-
-void FastCodeGenerator::TestAndBranch(Register source,
- Label* true_label,
- Label* false_label) {
- ASSERT_NE(NULL, true_label);
- ASSERT_NE(NULL, false_label);
- // Call the runtime to find the boolean value of the source and then
- // translate it into control flow to the pair of labels.
- __ push(source);
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, true_label);
- __ jmp(false_label);
-}
-
-
void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = decl->proxy()->var();
@@ -418,19 +546,21 @@
if (slot != NULL) {
switch (slot->type()) {
- case Slot::PARAMETER: // Fall through.
+ case Slot::PARAMETER:
case Slot::LOCAL:
if (decl->mode() == Variable::CONST) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+ __ str(ip, MemOperand(fp, SlotOffset(slot)));
} else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(ip);
- __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+ VisitForValue(decl->fun(), kAccumulator);
+ __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
}
break;
case Slot::CONTEXT:
+ // We bypass the general EmitSlotSearch because we know more about
+ // this specific context.
+
// The variable in the decl always resides in the current context.
ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
if (FLAG_debug_code) {
@@ -445,13 +575,13 @@
__ str(ip, CodeGenerator::ContextOperand(cp, slot->index()));
// No write barrier since the_hole_value is in old space.
} else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(r0);
- __ str(r0, CodeGenerator::ContextOperand(cp, slot->index()));
+ VisitForValue(decl->fun(), kAccumulator);
+ __ str(result_register(),
+ CodeGenerator::ContextOperand(cp, slot->index()));
int offset = Context::SlotOffset(slot->index());
__ mov(r2, Operand(offset));
// We know that we have written a function, which is not a smi.
- __ RecordWrite(cp, r2, r0);
+ __ RecordWrite(cp, r2, result_register());
}
break;
@@ -472,7 +602,8 @@
__ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
} else if (decl->fun() != NULL) {
__ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
- Visit(decl->fun()); // Initial value for function decl.
+ // Push initial value for function declaration.
+ VisitForValue(decl->fun(), kStack);
} else {
__ mov(r0, Operand(Smi::FromInt(0))); // No initial value!
__ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
@@ -486,17 +617,13 @@
if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->key()->context());
- Visit(prop->key());
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
if (decl->fun() != NULL) {
- ASSERT_EQ(Expression::kValue, decl->fun()->context());
- Visit(decl->fun());
- __ pop(r0);
+ VisitForValue(decl->fun(), kAccumulator);
} else {
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
}
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
@@ -504,7 +631,7 @@
// Value in r0 is ignored (declarations are statements). Receiver
// and key on stack are discarded.
- __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Drop(2);
}
}
}
@@ -521,21 +648,6 @@
}
-void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- Comment cmnt(masm_, "[ ReturnStatement");
- Expression* expr = stmt->expression();
- // Complete the statement based on the type of the subexpression.
- if (expr->AsLiteral() != NULL) {
- __ mov(r0, Operand(expr->AsLiteral()->handle()));
- } else {
- ASSERT_EQ(Expression::kValue, expr->context());
- Visit(expr);
- __ pop(r0);
- }
- EmitReturnSequence(stmt->statement_pos());
-}
-
-
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
@@ -550,30 +662,36 @@
__ mov(r0, Operand(boilerplate));
__ stm(db_w, sp, cp.bit() | r0.bit());
__ CallRuntime(Runtime::kNewClosure, 2);
- Move(expr->context(), r0);
+ Apply(context_, r0);
}
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
- Expression* rewrite = expr->var()->rewrite();
+ EmitVariableLoad(expr->var(), context_);
+}
+
+
+void FastCodeGenerator::EmitVariableLoad(Variable* var,
+ Expression::Context context) {
+ Expression* rewrite = var->rewrite();
if (rewrite == NULL) {
- ASSERT(expr->var()->is_global());
+ ASSERT(var->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object on the stack.
__ ldr(ip, CodeGenerator::GlobalObject());
__ push(ip);
- __ mov(r2, Operand(expr->name()));
+ __ mov(r2, Operand(var->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- DropAndMove(expr->context(), r0);
+ DropAndApply(1, context, r0);
} else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
if (FLAG_debug_code) {
switch (slot->type()) {
- case Slot::LOCAL:
- case Slot::PARAMETER: {
+ case Slot::PARAMETER:
+ case Slot::LOCAL: {
Comment cmnt(masm_, "Stack slot");
break;
}
@@ -584,21 +702,20 @@
case Slot::LOOKUP:
UNIMPLEMENTED();
break;
- default:
- UNREACHABLE();
}
}
- Move(expr->context(), slot, r0);
+ Apply(context, slot);
} else {
- // A variable has been rewritten into an explicit access to
- // an object property.
+ Comment cmnt(masm_, "Variable rewritten to property");
+ // A variable has been rewritten into an explicit access to an object
+ // property.
Property* property = rewrite->AsProperty();
ASSERT_NOT_NULL(property);
- // Currently the only parameter expressions that can occur are
- // on the form "slot[literal]".
+ // The only property expressions that can occur are of the form
+ // "slot[literal]".
- // Check that the object is in a slot.
+ // Assert that the object is in a slot.
Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
ASSERT_NOT_NULL(object_var);
Slot* object_slot = object_var->slot();
@@ -607,7 +724,7 @@
// Load the object.
Move(r2, object_slot);
- // Check that the key is a smi.
+ // Assert that the key is a smi.
Literal* key_literal = property->key()->AsLiteral();
ASSERT_NOT_NULL(key_literal);
ASSERT(key_literal->handle()->IsSmi());
@@ -618,12 +735,12 @@
// Push both as arguments to ic.
__ stm(db_w, sp, r2.bit() | r1.bit());
- // Do a KEYED property load.
+ // Do a keyed property load.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Drop key and object left on the stack by IC, and push the result.
- DropAndMove(expr->context(), r0, 2);
+ DropAndApply(2, context, r0);
}
}
@@ -651,43 +768,25 @@
__ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit());
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
__ bind(&done);
- Move(expr->context(), r0);
+ Apply(context_, r0);
}
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Label boilerplate_exists;
__ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // r2 = literal array (0).
__ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ ldr(r0, FieldMemOperand(r2, literal_offset));
- // Check whether we need to materialize the object literal boilerplate.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, Operand(ip));
- __ b(ne, &boilerplate_exists);
- // Create boilerplate if it does not exist.
- // r1 = literal index (1).
__ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
- // r0 = constant properties (2).
__ mov(r0, Operand(expr->constant_properties()));
__ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
- __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
- __ bind(&boilerplate_exists);
- // r0 contains boilerplate.
- // Clone boilerplate.
- __ push(r0);
if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
} else {
- __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+ __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
}
- // If result_saved == true: The result is saved on top of the
- // stack and in r0.
- // If result_saved == false: The result not on the stack, just in r0.
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in r0.
bool result_saved = false;
for (int i = 0; i < expr->properties()->length(); i++) {
@@ -703,112 +802,62 @@
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
-
- case ObjectLiteral::Property::MATERIALIZED_LITERAL: // Fall through.
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+ // Fall through.
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
- __ pop(r0);
+ VisitForValue(value, kAccumulator);
__ mov(r2, Operand(key->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// StoreIC leaves the receiver on the stack.
- __ ldr(r0, MemOperand(sp)); // Restore result into r0.
break;
}
// Fall through.
-
case ObjectLiteral::Property::PROTOTYPE:
+ // Duplicate receiver on stack.
+ __ ldr(r0, MemOperand(sp));
__ push(r0);
- Visit(key);
- ASSERT_EQ(Expression::kValue, key->context());
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
+ VisitForValue(key, kStack);
+ VisitForValue(value, kStack);
__ CallRuntime(Runtime::kSetProperty, 3);
- __ ldr(r0, MemOperand(sp)); // Restore result into r0.
break;
-
- case ObjectLiteral::Property::GETTER: // Fall through.
+ case ObjectLiteral::Property::GETTER:
case ObjectLiteral::Property::SETTER:
+ // Duplicate receiver on stack.
+ __ ldr(r0, MemOperand(sp));
__ push(r0);
- Visit(key);
- ASSERT_EQ(Expression::kValue, key->context());
+ VisitForValue(key, kStack);
__ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
Smi::FromInt(1) :
Smi::FromInt(0)));
__ push(r1);
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
+ VisitForValue(value, kStack);
__ CallRuntime(Runtime::kDefineAccessor, 4);
- __ ldr(r0, MemOperand(sp)); // Restore result into r0
break;
}
}
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- if (result_saved) __ pop();
- break;
- case Expression::kValue:
- if (!result_saved) __ push(r0);
- break;
- case Expression::kTest:
- if (result_saved) __ pop(r0);
- TestAndBranch(r0, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (!result_saved) __ push(r0);
- TestAndBranch(r0, true_label_, &discard);
- __ bind(&discard);
- __ pop();
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (!result_saved) __ push(r0);
- TestAndBranch(r0, &discard, false_label_);
- __ bind(&discard);
- __ pop();
- __ jmp(true_label_);
- break;
- }
+
+ if (result_saved) {
+ ApplyTOS(context_);
+ } else {
+ Apply(context_, r0);
}
}
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- Label make_clone;
-
- // Fetch the function's literals array.
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- // Check if the literal's boilerplate has been instantiated.
- int offset =
- FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
- __ ldr(r0, FieldMemOperand(r3, offset));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- __ b(&make_clone, ne);
-
- // Instantiate the boilerplate.
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(expr->literals()));
+ __ mov(r1, Operand(expr->constant_elements()));
__ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
- __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
-
- __ bind(&make_clone);
- // Clone the boilerplate.
- __ push(r0);
if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else {
- __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+ __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
}
bool result_saved = false; // Is the result saved to the stack?
@@ -829,166 +878,87 @@
__ push(r0);
result_saved = true;
}
- Visit(subexpr);
- ASSERT_EQ(Expression::kValue, subexpr->context());
+ VisitForValue(subexpr, kAccumulator);
// Store the subexpression value in the array's elements.
- __ pop(r0); // Subexpression value.
__ ldr(r1, MemOperand(sp)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ str(r0, FieldMemOperand(r1, offset));
+ __ str(result_register(), FieldMemOperand(r1, offset));
// Update the write barrier for the array store with r0 as the scratch
// register.
__ mov(r2, Operand(offset));
- __ RecordWrite(r1, r2, r0);
+ __ RecordWrite(r1, r2, result_register());
}
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- if (result_saved) __ pop();
- break;
- case Expression::kValue:
- if (!result_saved) __ push(r0);
- break;
- case Expression::kTest:
- if (result_saved) __ pop(r0);
- TestAndBranch(r0, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (!result_saved) __ push(r0);
- TestAndBranch(r0, true_label_, &discard);
- __ bind(&discard);
- __ pop();
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (!result_saved) __ push(r0);
- TestAndBranch(r0, &discard, false_label_);
- __ bind(&discard);
- __ pop();
- __ jmp(true_label_);
- break;
- }
+ if (result_saved) {
+ ApplyTOS(context_);
+ } else {
+ Apply(context_, r0);
}
}
-void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ __ mov(r2, Operand(key->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void FastCodeGenerator::EmitBinaryOp(Token::Value op,
+ Expression::Context context) {
+ __ pop(r1);
+ GenericBinaryOpStub stub(op, NO_OVERWRITE);
+ __ CallStub(&stub);
+ Apply(context, r0);
+}
+
+
+void FastCodeGenerator::EmitVariableAssignment(Variable* var,
+ Expression::Context context) {
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
if (var->is_global()) {
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in r0, variable name in
// r2, and the global object on the stack.
- __ pop(r0);
__ mov(r2, Operand(var->name()));
__ ldr(ip, CodeGenerator::GlobalObject());
__ push(ip);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Overwrite the global object on the stack with the result if needed.
- DropAndMove(expr->context(), r0);
+ DropAndApply(1, context, r0);
- } else if (var->slot()) {
+ } else if (var->slot() != NULL) {
Slot* slot = var->slot();
- ASSERT_NOT_NULL(slot); // Variables rewritten as properties not handled.
switch (slot->type()) {
case Slot::LOCAL:
- case Slot::PARAMETER: {
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Perform assignment and discard value.
- __ pop(r0);
- __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
- break;
- case Expression::kValue:
- // Perform assignment and preserve value.
- __ ldr(r0, MemOperand(sp));
- __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
- break;
- case Expression::kTest:
- // Perform assignment and test (and discard) value.
- __ pop(r0);
- __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
- TestAndBranch(r0, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ ldr(r0, MemOperand(sp));
- __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
- TestAndBranch(r0, true_label_, &discard);
- __ bind(&discard);
- __ pop();
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ ldr(r0, MemOperand(sp));
- __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
- TestAndBranch(r0, &discard, false_label_);
- __ bind(&discard);
- __ pop();
- __ jmp(true_label_);
- break;
- }
- }
+ case Slot::PARAMETER:
+ __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
break;
- }
case Slot::CONTEXT: {
- int chain_length =
- function_->scope()->ContextChainLength(slot->var()->scope());
- if (chain_length > 0) {
- // Move up the chain of contexts to the context containing the slot.
- __ ldr(r0, CodeGenerator::ContextOperand(cp, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ ldr(r0, FieldMemOperand(r0, JSFunction::kContextOffset));
- for (int i = 1; i < chain_length; i++) {
- __ ldr(r0,
- CodeGenerator::ContextOperand(r0, Context::CLOSURE_INDEX));
- __ ldr(r0, FieldMemOperand(r0, JSFunction::kContextOffset));
- }
- } else { // Slot is in the current context. Generate optimized code.
- __ mov(r0, cp);
- }
- // The context may be an intermediate context, not a function context.
- __ ldr(r0, CodeGenerator::ContextOperand(r0, Context::FCONTEXT_INDEX));
- __ pop(r1);
- __ str(r1, CodeGenerator::ContextOperand(r0, slot->index()));
+ MemOperand target = EmitSlotSearch(slot, r1);
+ __ str(result_register(), target);
// RecordWrite may destroy all its register arguments.
- if (expr->context() == Expression::kValue) {
- __ push(r1);
- } else if (expr->context() != Expression::kEffect) {
- __ mov(r3, r1);
- }
+ __ mov(r3, result_register());
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- // Update the write barrier for the array store with r0 as the scratch
- // register. Skip the write barrier if the value written (r1) is a smi.
- // The smi test is part of RecordWrite on other platforms, not on arm.
- Label exit;
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &exit);
-
__ mov(r2, Operand(offset));
- __ RecordWrite(r0, r2, r1);
- __ bind(&exit);
- if (expr->context() != Expression::kEffect &&
- expr->context() != Expression::kValue) {
- Move(expr->context(), r3);
- }
+ __ RecordWrite(r1, r2, r3);
break;
}
@@ -996,6 +966,11 @@
UNREACHABLE();
break;
}
+ Apply(context, result_register());
+ } else {
+ // Variables rewritten as properties are not treated as variables in
+ // assignments.
+ UNREACHABLE();
}
}
@@ -1010,12 +985,15 @@
// change to slow case to avoid the quadratic behavior of repeatedly
// adding fast properties.
if (expr->starts_initialization_block()) {
- __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value.
+ __ push(result_register());
+ __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is now under value.
__ push(ip);
__ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
}
- __ pop(r0);
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -1029,7 +1007,7 @@
__ pop(r0);
}
- DropAndMove(expr->context(), r0);
+ DropAndApply(1, context_, r0);
}
@@ -1040,20 +1018,23 @@
// change to slow case to avoid the quadratic behavior of repeatedly
// adding fast properties.
if (expr->starts_initialization_block()) {
- // Reciever is under the key and value.
+ __ push(result_register());
+ // Receiver is now under the key and value.
__ ldr(ip, MemOperand(sp, 2 * kPointerSize));
__ push(ip);
__ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
}
- __ pop(r0);
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
__ push(r0); // Result of assignment, saved even if not needed.
- // Reciever is under the key and value.
+ // Receiver is under the key and value.
__ ldr(ip, MemOperand(sp, 2 * kPointerSize));
__ push(ip);
__ CallRuntime(Runtime::kToFastProperties, 1);
@@ -1061,58 +1042,48 @@
}
// Receiver and key are still on stack.
- __ add(sp, sp, Operand(2 * kPointerSize));
- Move(expr->context(), r0);
+ DropAndApply(2, context_, r0);
}
void FastCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
- uint32_t dummy;
-
- // Record the source position for the property load.
- SetSourcePosition(expr->position());
// Evaluate receiver.
- Visit(expr->obj());
+ VisitForValue(expr->obj(), kStack);
- if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
- !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
- // Do a NAMED property load.
- // The IC expects the property name in r2 and the receiver on the stack.
- __ mov(r2, Operand(key->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
+ if (key->IsPropertyName()) {
+ EmitNamedPropertyLoad(expr);
+ // Drop receiver left on the stack by IC.
+ DropAndApply(1, context_, r0);
} else {
- // Do a KEYED property load.
- Visit(expr->key());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
+ VisitForValue(expr->key(), kStack);
+ EmitKeyedPropertyLoad(expr);
// Drop key and receiver left on the stack by IC.
- __ pop();
+ DropAndApply(2, context_, r0);
}
- DropAndMove(expr->context(), r0);
}
-void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
+void FastCodeGenerator::EmitCallWithIC(Call* expr,
+ Handle<Object> ignored,
+ RelocInfo::Mode mode) {
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ VisitForValue(args->at(i), kStack);
}
// Record source position for debugger.
SetSourcePosition(expr->position());
// Call the IC initialization code.
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
NOT_IN_LOOP);
- __ Call(ic, reloc_info);
+ __ Call(ic, mode);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- DropAndMove(expr->context(), r0);
+ DropAndApply(1, context_, r0);
}
@@ -1121,16 +1092,16 @@
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
+ VisitForValue(args->at(i), kStack);
}
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, NOT_IN_LOOP);
+ CallFunctionStub stub(arg_count, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
__ CallStub(&stub);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- DropAndMove(expr->context(), r0);
+ DropAndApply(1, context_, r0);
}
@@ -1148,7 +1119,7 @@
// Push global object as receiver for the call IC lookup.
__ ldr(r0, CodeGenerator::GlobalObject());
__ stm(db_w, sp, r1.bit() | r0.bit());
- EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
+ EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// Call to a lookup slot.
@@ -1161,13 +1132,13 @@
// Call to a named property, use call IC.
__ mov(r0, Operand(key->handle()));
__ push(r0);
- Visit(prop->obj());
- EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
+ VisitForValue(prop->obj(), kStack);
+ EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property, use keyed load IC followed by function
// call.
- Visit(prop->obj());
- Visit(prop->key());
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
// Record source code position for IC call.
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@@ -1175,6 +1146,7 @@
// Load receiver object into r1.
if (prop->is_synthetic()) {
__ ldr(r1, CodeGenerator::GlobalObject());
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
} else {
__ ldr(r1, MemOperand(sp, kPointerSize));
}
@@ -1193,7 +1165,7 @@
loop_depth() == 0) {
lit->set_try_fast_codegen(true);
}
- Visit(fun);
+ VisitForValue(fun, kStack);
// Load global receiver object.
__ ldr(r1, CodeGenerator::GlobalObject());
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
@@ -1210,8 +1182,7 @@
// expression in new calls must be evaluated before the
// arguments.
// Push function on the stack.
- Visit(expr->expression());
- ASSERT_EQ(Expression::kValue, expr->expression()->context());
+ VisitForValue(expr->expression(), kStack);
// Push global object (receiver).
__ ldr(r0, CodeGenerator::GlobalObject());
@@ -1220,10 +1191,7 @@
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
- // If location is value, it is already on the stack,
- // so nothing to do here.
+ VisitForValue(args->at(i), kStack);
}
// Call the construct call builtin that handles allocation and
@@ -1232,14 +1200,14 @@
// Load function, arg_count into r1 and r0.
__ mov(r0, Operand(arg_count));
- // Function is in esp[arg_count + 1].
+ // Function is in sp[arg_count + 1].
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
// Replace function on TOS with result in r0, or pop it.
- DropAndMove(expr->context(), r0);
+ DropAndApply(1, context_, r0);
}
@@ -1258,8 +1226,7 @@
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ VisitForValue(args->at(i), kStack);
}
if (expr->is_jsruntime()) {
@@ -1270,11 +1237,11 @@
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- DropAndMove(expr->context(), r0);
+ DropAndApply(1, context_, r0);
} else {
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
- Move(expr->context(), r0);
+ Apply(context_, r0);
}
}
@@ -1283,23 +1250,35 @@
switch (expr->op()) {
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- Visit(expr->expression());
- ASSERT_EQ(Expression::kEffect, expr->expression()->context());
- switch (expr->context()) {
+ VisitForEffect(expr->expression());
+ switch (context_) {
case Expression::kUninitialized:
UNREACHABLE();
break;
case Expression::kEffect:
break;
case Expression::kValue:
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ push(ip);
+ __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
break;
case Expression::kTestValue:
// Value is false so it's needed.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ push(ip);
- case Expression::kTest: // Fall through.
+ __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ // Fall through.
+ case Expression::kTest:
case Expression::kValueTest:
__ jmp(false_label_);
break;
@@ -1309,74 +1288,39 @@
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- ASSERT_EQ(Expression::kTest, expr->expression()->context());
-
- Label push_true;
- Label push_false;
- Label done;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
- switch (expr->context()) {
+ Label materialize_true, materialize_false, done;
+ // Initially assume a pure test context. Notice that the labels are
+ // swapped.
+ Label* if_true = false_label_;
+ Label* if_false = true_label_;
+ switch (context_) {
case Expression::kUninitialized:
UNREACHABLE();
break;
-
- case Expression::kValue:
- true_label_ = &push_false;
- false_label_ = &push_true;
- Visit(expr->expression());
- __ bind(&push_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- __ jmp(&done);
- __ bind(&push_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- __ bind(&done);
- break;
-
case Expression::kEffect:
- true_label_ = &done;
- false_label_ = &done;
- Visit(expr->expression());
- __ bind(&done);
+ if_true = &done;
+ if_false = &done;
break;
-
+ case Expression::kValue:
+ if_true = &materialize_false;
+ if_false = &materialize_true;
+ break;
case Expression::kTest:
- true_label_ = saved_false;
- false_label_ = saved_true;
- Visit(expr->expression());
break;
-
case Expression::kValueTest:
- true_label_ = saved_false;
- false_label_ = &push_true;
- Visit(expr->expression());
- __ bind(&push_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- __ jmp(saved_true);
+ if_false = &materialize_true;
break;
-
case Expression::kTestValue:
- true_label_ = &push_false;
- false_label_ = saved_true;
- Visit(expr->expression());
- __ bind(&push_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- __ jmp(saved_false);
+ if_true = &materialize_false;
break;
}
- true_label_ = saved_true;
- false_label_ = saved_false;
+ VisitForControl(expr->expression(), if_true, if_false);
+ Apply(context_, if_false, if_true); // Labels swapped.
break;
}
case Token::TYPEOF: {
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- ASSERT_EQ(Expression::kValue, expr->expression()->context());
-
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL &&
!proxy->var()->is_this() &&
@@ -1399,11 +1343,11 @@
__ push(r0);
} else {
// This expression cannot throw a reference error at the top level.
- Visit(expr->expression());
+ VisitForValue(expr->expression(), kStack);
}
__ CallRuntime(Runtime::kTypeof, 1);
- Move(expr->context(), r0);
+ Apply(context_, r0);
break;
}
@@ -1415,73 +1359,124 @@
void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- Visit(proxy);
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ Location saved_location = location_;
+ location_ = kStack;
+ EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
+ Expression::kValue);
+ location_ = saved_location;
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && context_ != Expression::kEffect) {
+ __ mov(ip, Operand(Smi::FromInt(0)));
+ __ push(ip);
+ }
+ VisitForValue(prop->obj(), kStack);
+ if (assign_type == NAMED_PROPERTY) {
+ EmitNamedPropertyLoad(prop);
+ } else {
+ VisitForValue(prop->key(), kStack);
+ EmitKeyedPropertyLoad(prop);
+ }
+ __ push(r0);
+ }
+
+ // Convert to number.
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kValue: // Fall through
- case Expression::kTest: // Fall through
- case Expression::kTestValue: // Fall through
- case Expression::kValueTest:
- // Duplicate the result on the stack.
- __ push(r0);
- break;
- case Expression::kEffect:
- // Do not save result.
- break;
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ case Expression::kValue:
+ case Expression::kTest:
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(r0);
+ break;
+ case NAMED_PROPERTY:
+ __ str(r0, MemOperand(sp, kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ str(r0, MemOperand(sp, 2 * kPointerSize));
+ break;
+ }
+ break;
+ }
}
- // Call runtime for +1/-1.
- __ push(r0);
- __ mov(ip, Operand(Smi::FromInt(1)));
- __ push(ip);
- if (expr->op() == Token::INC) {
- __ CallRuntime(Runtime::kNumberAdd, 2);
- } else {
- __ CallRuntime(Runtime::kNumberSub, 2);
- }
- // Call Store IC.
- __ mov(r2, Operand(proxy->AsVariable()->name()));
- __ ldr(ip, CodeGenerator::GlobalObject());
- __ push(ip);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // Restore up stack after store IC.
- __ add(sp, sp, Operand(kPointerSize));
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect: // Fall through
- case Expression::kValue:
- // Do nothing. Result in either on the stack for value context
- // or discarded for effect context.
+ // Call stub for +1/-1.
+ __ mov(r1, Operand(expr->op() == Token::INC
+ ? Smi::FromInt(1)
+ : Smi::FromInt(-1)));
+ GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
+ __ CallStub(&stub);
+
+ // Store the value returned in r0.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Expression::kEffect);
+ // For all contexts except kEffect: We have the result on
+ // top of the stack.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ context_);
+ }
break;
- case Expression::kTest:
- __ pop(r0);
- TestAndBranch(r0, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ ldr(r0, MemOperand(sp));
- TestAndBranch(r0, true_label_, &discard);
- __ bind(&discard);
- __ add(sp, sp, Operand(kPointerSize));
- __ b(false_label_);
+ case NAMED_PROPERTY: {
+ __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ if (expr->is_postfix()) {
+ __ Drop(1); // Result is on the stack under the receiver.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ DropAndApply(1, context_, r0);
+ }
break;
}
- case Expression::kTestValue: {
- Label discard;
- __ ldr(r0, MemOperand(sp));
- TestAndBranch(r0, &discard, false_label_);
- __ bind(&discard);
- __ add(sp, sp, Operand(kPointerSize));
- __ b(true_label_);
+ case KEYED_PROPERTY: {
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ if (expr->is_postfix()) {
+ __ Drop(2); // Result is on the stack under the key and the receiver.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ DropAndApply(2, context_, r0);
+ }
break;
}
}
@@ -1492,9 +1487,7 @@
Comment cmnt(masm_, "[ BinaryOperation");
switch (expr->op()) {
case Token::COMMA:
- ASSERT_EQ(Expression::kEffect, expr->left()->context());
- ASSERT_EQ(expr->context(), expr->right()->context());
- Visit(expr->left());
+ VisitForEffect(expr->left());
Visit(expr->right());
break;
@@ -1513,21 +1506,12 @@
case Token::BIT_XOR:
case Token::SHL:
case Token::SHR:
- case Token::SAR: {
- ASSERT_EQ(Expression::kValue, expr->left()->context());
- ASSERT_EQ(Expression::kValue, expr->right()->context());
-
- Visit(expr->left());
- Visit(expr->right());
- __ pop(r0);
- __ pop(r1);
- GenericBinaryOpStub stub(expr->op(),
- NO_OVERWRITE);
- __ CallStub(&stub);
- Move(expr->context(), r0);
-
+ case Token::SAR:
+ VisitForValue(expr->left(), kStack);
+ VisitForValue(expr->right(), kAccumulator);
+ EmitBinaryOp(expr->op(), context_);
break;
- }
+
default:
UNREACHABLE();
}
@@ -1536,65 +1520,58 @@
void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- ASSERT_EQ(Expression::kValue, expr->left()->context());
- ASSERT_EQ(Expression::kValue, expr->right()->context());
- Visit(expr->left());
- Visit(expr->right());
- // Convert current context to test context: Pre-test code.
- Label push_true;
- Label push_false;
- Label done;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
- switch (expr->context()) {
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+ Label materialize_true, materialize_false, done;
+ // Initially assume we are in a test context.
+ Label* if_true = true_label_;
+ Label* if_false = false_label_;
+ switch (context_) {
case Expression::kUninitialized:
UNREACHABLE();
break;
-
- case Expression::kValue:
- true_label_ = &push_true;
- false_label_ = &push_false;
- break;
-
case Expression::kEffect:
- true_label_ = &done;
- false_label_ = &done;
+ if_true = &done;
+ if_false = &done;
break;
-
+ case Expression::kValue:
+ if_true = &materialize_true;
+ if_false = &materialize_false;
+ break;
case Expression::kTest:
break;
-
case Expression::kValueTest:
- true_label_ = &push_true;
+ if_true = &materialize_true;
break;
-
case Expression::kTestValue:
- false_label_ = &push_false;
+ if_false = &materialize_false;
break;
}
- // Convert current context to test context: End pre-test code.
+ VisitForValue(expr->left(), kStack);
switch (expr->op()) {
- case Token::IN: {
+ case Token::IN:
+ VisitForValue(expr->right(), kStack);
__ InvokeBuiltin(Builtins::IN, CALL_JS);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
- __ b(eq, true_label_);
- __ jmp(false_label_);
+ __ b(eq, if_true);
+ __ jmp(if_false);
break;
- }
case Token::INSTANCEOF: {
+ VisitForValue(expr->right(), kStack);
InstanceofStub stub;
__ CallStub(&stub);
__ tst(r0, r0);
- __ b(eq, true_label_); // The stub returns 0 for true.
- __ jmp(false_label_);
+ __ b(eq, if_true); // The stub returns 0 for true.
+ __ jmp(if_false);
break;
}
default: {
+ VisitForValue(expr->right(), kAccumulator);
Condition cc = eq;
bool strict = false;
switch (expr->op()) {
@@ -1603,29 +1580,26 @@
// Fall through
case Token::EQ:
cc = eq;
- __ pop(r0);
__ pop(r1);
break;
case Token::LT:
cc = lt;
- __ pop(r0);
__ pop(r1);
break;
case Token::GT:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ // Reverse left and right sides to obtain ECMA-262 conversion order.
cc = lt;
- __ pop(r1);
+ __ mov(r1, result_register());
__ pop(r0);
break;
case Token::LTE:
- // Reverse left and right sizes to obtain ECMA-262 conversion order.
+ // Reverse left and right sides to obtain ECMA-262 conversion order.
cc = ge;
- __ pop(r1);
+ __ mov(r1, result_register());
__ pop(r0);
break;
case Token::GTE:
cc = ge;
- __ pop(r0);
__ pop(r1);
break;
case Token::IN:
@@ -1641,68 +1615,75 @@
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, &slow_case);
__ cmp(r1, r0);
- __ b(cc, true_label_);
- __ jmp(false_label_);
+ __ b(cc, if_true);
+ __ jmp(if_false);
__ bind(&slow_case);
CompareStub stub(cc, strict);
__ CallStub(&stub);
- __ tst(r0, r0);
- __ b(cc, true_label_);
- __ jmp(false_label_);
+ __ cmp(r0, Operand(0));
+ __ b(cc, if_true);
+ __ jmp(if_false);
}
}
- // Convert current context to test context: Post-test code.
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
-
- case Expression::kValue:
- __ bind(&push_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- __ jmp(&done);
- __ bind(&push_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- __ bind(&done);
- break;
-
- case Expression::kEffect:
- __ bind(&done);
- break;
-
- case Expression::kTest:
- break;
-
- case Expression::kValueTest:
- __ bind(&push_true);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ push(ip);
- __ jmp(saved_true);
- break;
-
- case Expression::kTestValue:
- __ bind(&push_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ push(ip);
- __ jmp(saved_false);
- break;
- }
- true_label_ = saved_true;
- false_label_ = saved_false;
- // Convert current context to test context: End post-test code.
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ Apply(context_, if_true, if_false);
}
+
void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- Move(expr->context(), r0);
+ Apply(context_, r0);
+}
+
+
+Register FastCodeGenerator::result_register() { return r0; }
+
+
+Register FastCodeGenerator::context_register() { return cp; }
+
+
+void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ __ str(value, MemOperand(fp, frame_offset));
+}
+
+
+void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ ldr(dst, CodeGenerator::ContextOperand(cp, context_index));
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FastCodeGenerator::EnterFinallyBlock() {
+ ASSERT(!result_register().is(r1));
+ // Store result register while executing finally block.
+ __ push(result_register());
+ // Cook return address in link register to stack (smi encoded Code* delta)
+ __ sub(r1, lr, Operand(masm_->CodeObject()));
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ ASSERT_EQ(0, kSmiTag);
+ __ add(r1, r1, Operand(r1)); // Convert to smi.
+ __ push(r1);
+}
+
+
+void FastCodeGenerator::ExitFinallyBlock() {
+ ASSERT(!result_register().is(r1));
+ // Restore result register from stack.
+ __ pop(r1);
+ // Uncook return address and return.
+ __ pop(result_register());
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ __ mov(r1, Operand(r1, ASR, 1)); // Un-smi-tag value.
+ __ add(pc, r1, Operand(masm_->CodeObject()));
}
#undef __
-
} } // namespace v8::internal
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index b0fa13a..0cb7f12 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -28,7 +28,11 @@
#include "v8.h"
#include "frames-inl.h"
+#ifdef V8_ARM_VARIANT_THUMB
+#include "arm/assembler-thumb2-inl.h"
+#else
#include "arm/assembler-arm-inl.h"
+#endif
namespace v8 {
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index c56f414..a1f2613 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -276,7 +276,7 @@
// Cache miss: Jump to runtime.
__ bind(&miss);
- Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+ GenerateMiss(masm, argc);
}
@@ -371,13 +371,11 @@
// Cache miss: Jump to runtime.
__ bind(&miss);
- Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+ GenerateMiss(masm, argc);
}
-void CallIC::Generate(MacroAssembler* masm,
- int argc,
- const ExternalReference& f) {
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
@@ -394,7 +392,7 @@
// Call the entry.
__ mov(r0, Operand(2));
- __ mov(r1, Operand(f));
+ __ mov(r1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
CEntryStub stub(1);
__ CallStub(&stub);
@@ -620,6 +618,15 @@
}
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[4] : receiver
+ GenerateGeneric(masm);
+}
+
+
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// TODO(476): port specialized code.
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index aa6570c..18cadac 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -162,6 +162,21 @@
}
+void MacroAssembler::Drop(int count, Condition cond) {
+ if (count > 0) {
+ add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
+ }
+}
+
+
+void MacroAssembler::Call(Label* target) {
+ bl(target);
+}
+
+
+void MacroAssembler::Move(Register dst, Handle<Object> value) {
+ mov(dst, Operand(value));
+}
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
@@ -628,6 +643,15 @@
}
+void MacroAssembler::PopTryHandler() {
+ ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
+ pop(r1);
+ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
+ add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
+ str(r1, MemOperand(ip));
+}
+
+
Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg,
Register scratch,
@@ -994,9 +1018,9 @@
Register outLowReg) {
// ARMv7 VFP3 instructions to implement integer to double conversion.
mov(r7, Operand(inReg, ASR, kSmiTagSize));
- fmsr(s15, r7);
- fsitod(d7, s15);
- fmrrd(outLowReg, outHighReg, d7);
+ vmov(s15, r7);
+ vcvt(d7, s15);
+ vmov(outLowReg, outHighReg, d7);
}
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 0974329..8f2064a 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -64,6 +64,13 @@
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Ret(Condition cond = al);
+
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the sp register.
+ void Drop(int count, Condition cond = al);
+
+ void Call(Label* target);
+ void Move(Register dst, Handle<Object> value);
// Jumps to the label at the index given by the Smi in "index".
void SmiJumpTable(Register index, Vector<Label*> targets);
// Load an object from the root table.
@@ -148,6 +155,9 @@
// On exit, r0 contains TOS (code slot).
void PushTryHandler(CodeLocation try_location, HandlerType type);
+ // Unlink the stack handler on top of the stack from the try handler chain.
+ // Must preserve the result register.
+ void PopTryHandler();
// ---------------------------------------------------------------------------
// Inline caching support
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 24b6a9c..ed06eb2 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -59,15 +59,19 @@
*
* Each call to a public method should retain this convention.
* The stack will have the following structure:
+ * - direct_call (if 1, direct call from JavaScript code, if 0 call
+ * through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
- * - at_start (if 1, start at start of string, if 0, don't)
+ * - at_start (if 1, we are starting at the start of the
+ * string, otherwise 0)
+ * - int* capture_array (int[num_saved_registers_], for output).
* --- sp when called ---
* - link address
* - backup of registers r4..r11
- * - int* capture_array (int[num_saved_registers_], for output).
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
+ * - start index (character index of start)
* --- frame pointer ----
* - void* input_string (location of a handle containing the string)
* - Offset of location before start of input (effectively character
@@ -85,11 +89,13 @@
* The data up to the return address must be placed there by the calling
* code, by calling the code entry as cast to a function with the signature:
* int (*match)(String* input_string,
+ * int start_index,
* Address start,
* Address end,
* int* capture_output_array,
* bool at_start,
- * byte* stack_area_base)
+ * byte* stack_area_base,
+ * bool direct_call)
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc).
*/
@@ -459,8 +465,6 @@
bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
- int cp_offset,
- bool check_offset,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
// (c - min) <= (max - min) check
@@ -469,11 +473,6 @@
// Match space-characters
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
Label success;
__ cmp(current_character(), Operand(' '));
__ b(eq, &success);
@@ -487,11 +486,6 @@
return false;
case 'S':
// Match non-space characters.
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
__ cmp(current_character(), Operand(' '));
@@ -504,33 +498,18 @@
return false;
case 'd':
// Match ASCII digits ('0'..'9')
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
__ sub(r0, current_character(), Operand('0'));
__ cmp(current_character(), Operand('9' - '0'));
BranchOrBacktrack(hi, on_no_match);
return true;
case 'D':
// Match non ASCII-digits
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
__ sub(r0, current_character(), Operand('0'));
__ cmp(r0, Operand('9' - '0'));
BranchOrBacktrack(ls, on_no_match);
return true;
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
__ eor(r0, current_character(), Operand(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ sub(r0, r0, Operand(0x0b));
@@ -546,13 +525,61 @@
}
return true;
}
- case '*':
- // Match any character.
- if (check_offset) {
- CheckPosition(cp_offset, on_no_match);
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ __ eor(r0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ sub(r0, r0, Operand(0x0b));
+ __ cmp(r0, Operand(0x0c - 0x0b));
+ if (mode_ == ASCII) {
+ BranchOrBacktrack(hi, on_no_match);
+ } else {
+ Label done;
+ __ b(ls, &done);
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ sub(r0, r0, Operand(0x2028 - 0x0b));
+ __ cmp(r0, Operand(1));
+ BranchOrBacktrack(hi, on_no_match);
+ __ bind(&done);
}
return true;
- // No custom implementation (yet): w, W, s(UC16), S(UC16).
+ }
+ case 'w': {
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmp(current_character(), Operand('z'));
+ BranchOrBacktrack(hi, on_no_match);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ mov(r0, Operand(map));
+ __ ldrb(r0, MemOperand(r0, current_character()));
+ __ tst(r0, Operand(r0));
+ BranchOrBacktrack(eq, on_no_match);
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmp(current_character(), Operand('z'));
+ __ b(hi, &done);
+ }
+ ExternalReference map = ExternalReference::re_word_character_map();
+ __ mov(r0, Operand(map));
+ __ ldrb(r0, MemOperand(r0, current_character()));
+ __ tst(r0, Operand(r0));
+ BranchOrBacktrack(ne, on_no_match);
+ if (mode_ != ASCII) {
+ __ bind(&done);
+ }
+ return true;
+ }
+ case '*':
+ // Match any character.
+ return true;
+ // No custom implementation (yet): s(UC16), S(UC16).
default:
return false;
}
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index f70bc05..4459859 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -80,8 +80,6 @@
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type,
- int cp_offset,
- bool check_offset,
Label* on_no_match);
virtual void Fail();
virtual Handle<Object> GetCode(Handle<String> source);
@@ -127,6 +125,7 @@
static const int kRegisterOutput = kReturnAddress + kPointerSize;
static const int kAtStart = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kAtStart + kPointerSize;
+ static const int kDirectCall = kStackHighEnd + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 9dc417b..c4b1e00 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -355,6 +355,10 @@
} else {
PrintF("Not at debugger stop.");
}
+ } else if ((strcmp(cmd, "t") == 0) || strcmp(cmd, "trace") == 0) {
+ ::v8::internal::FLAG_trace_sim = !::v8::internal::FLAG_trace_sim;
+ PrintF("Trace of executed instructions is %s\n",
+ ::v8::internal::FLAG_trace_sim ? "on" : "off");
} else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
PrintF("cont\n");
PrintF(" continue execution (alias 'c')\n");
@@ -378,7 +382,9 @@
PrintF(" delete the breakpoint\n");
PrintF("unstop\n");
PrintF(" ignore the stop instruction at the current location");
- PrintF(" from now on\n");
+ PrintF(" from now on\n");
+ PrintF("trace (alias 't')\n");
+ PrintF(" toogle the tracing of all executed statements");
} else {
PrintF("Unknown command: %s\n", cmd);
}
@@ -890,8 +896,13 @@
// Support for VFP comparisons.
void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
+ if (isnan(val1) || isnan(val2)) {
+ n_flag_FPSCR_ = false;
+ z_flag_FPSCR_ = false;
+ c_flag_FPSCR_ = true;
+ v_flag_FPSCR_ = true;
// All non-NaN cases.
- if (val1 == val2) {
+ } else if (val1 == val2) {
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = true;
c_flag_FPSCR_ = true;
@@ -1893,14 +1904,14 @@
// void Simulator::DecodeTypeVFP(Instr* instr)
// The Following ARMv7 VFPv instructions are currently supported.
-// fmsr :Sn = Rt
-// fmrs :Rt = Sn
-// fsitod: Dd = Sm
-// ftosid: Sd = Dm
-// Dd = faddd(Dn, Dm)
-// Dd = fsubd(Dn, Dm)
-// Dd = fmuld(Dn, Dm)
-// Dd = fdivd(Dn, Dm)
+// vmov :Sn = Rt
+// vmov :Rt = Sn
+// vcvt: Dd = Sm
+// vcvt: Sd = Dm
+// Dd = vadd(Dn, Dm)
+// Dd = vsub(Dn, Dm)
+// Dd = vmul(Dn, Dm)
+// Dd = vdiv(Dn, Dm)
// vcmp(Dd, Dm)
// VMRS
void Simulator::DecodeTypeVFP(Instr* instr) {
@@ -2020,8 +2031,8 @@
// void Simulator::DecodeType6CoprocessorIns(Instr* instr)
// Decode Type 6 coprocessor instructions.
-// Dm = fmdrr(Rt, Rt2)
-// <Rt, Rt2> = fmrrd(Dm)
+// Dm = vmov(Rt, Rt2)
+// <Rt, Rt2> = vmov(Dm)
void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 3a4bb31..3ce5b7a 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -62,9 +62,9 @@
// Call the generated regexp code directly. The entry function pointer should
-// expect seven int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- entry(p0, p1, p2, p3, p4, p5, p6)
+// expect eight int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ entry(p0, p1, p2, p3, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
@@ -79,9 +79,9 @@
assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
assembler::arm::Simulator::current()->Call( \
- FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
+ FUNCTION_ADDR(entry), 8, p0, p1, p2, p3, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index efccaf4..687fb1e 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -446,7 +446,7 @@
}
-void StubCompiler::GenerateLoadCallback(JSObject* object,
+bool StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
@@ -454,7 +454,8 @@
Register scratch2,
AccessorInfo* callback,
String* name,
- Label* miss) {
+ Label* miss,
+ Failure** failure) {
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss);
@@ -476,6 +477,8 @@
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(load_callback_property, 5, 1);
+
+ return true;
}
@@ -634,50 +637,65 @@
break;
case STRING_CHECK:
- // Check that the object is a two-byte string or a symbol.
- __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
- __ b(hs, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- r2);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
- r1, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ // Check that the object is a two-byte string or a symbol.
+ __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(hs, &miss);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ r2);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+ r1, name, &miss);
+ }
break;
case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &fast);
- __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::NUMBER_FUNCTION_INDEX,
- r2);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
- r1, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &fast);
+ __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
+ __ b(ne, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::NUMBER_FUNCTION_INDEX,
+ r2);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+ r1, name, &miss);
+ }
break;
}
case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &fast);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::BOOLEAN_FUNCTION_INDEX,
- r2);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
- r1, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a boolean.
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r1, ip);
+ __ b(eq, &fast);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r1, ip);
+ __ b(ne, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::BOOLEAN_FUNCTION_INDEX,
+ r2);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+ r1, name, &miss);
+ }
break;
}
@@ -774,8 +792,26 @@
__ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- __ cmp(r1, Operand(Handle<JSFunction>(function)));
- __ b(ne, &miss);
+ if (Heap::InNewSpace(function)) {
+ // We can't embed a pointer to a function in new space so we have
+ // to verify that the shared function info is unchanged. This has
+ // the nice side effect that multiple closures based on the same
+ // function can all use this call IC. Before we load through the
+ // function, we have to verify that it still is a function.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+ __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
+ __ b(ne, &miss);
+
+ // Check the shared function info. Make sure it hasn't changed.
+ __ mov(r3, Operand(Handle<SharedFunctionInfo>(function->shared())));
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ cmp(r2, r3);
+ __ b(ne, &miss);
+ } else {
+ __ cmp(r1, Operand(Handle<JSFunction>(function)));
+ __ b(ne, &miss);
+ }
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -1003,10 +1039,10 @@
}
-Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+Object* LoadStubCompiler::CompileLoadCallback(String* name,
+ JSObject* object,
JSObject* holder,
- AccessorInfo* callback,
- String* name) {
+ AccessorInfo* callback) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@@ -1015,7 +1051,11 @@
Label miss;
__ ldr(r0, MemOperand(sp, 0));
- GenerateLoadCallback(object, holder, r0, r2, r3, r1, callback, name, &miss);
+ Failure* failure = Failure::InternalError();
+ bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1,
+ callback, name, &miss, &failure);
+ if (!success) return failure;
+
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1168,7 +1208,11 @@
__ cmp(r2, Operand(Handle<String>(name)));
__ b(ne, &miss);
- GenerateLoadCallback(receiver, holder, r0, r2, r3, r1, callback, name, &miss);
+ Failure* failure = Failure::InternalError();
+ bool success = GenerateLoadCallback(receiver, holder, r0, r2, r3, r1,
+ callback, name, &miss, &failure);
+ if (!success) return failure;
+
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 132c8ae..a33ebd4 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -143,12 +143,25 @@
if (count > 0) {
Comment cmnt(masm(), "[ Allocate space for locals");
Adjust(count);
- // Initialize stack slots with 'undefined' value.
+ // Initialize stack slots with 'undefined' value.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- }
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- for (int i = 0; i < count; i++) {
- __ push(ip);
+ __ LoadRoot(r2, Heap::kStackLimitRootIndex);
+ if (count < kLocalVarBound) {
+ // For less locals the unrolled loop is more compact.
+ for (int i = 0; i < count; i++) {
+ __ push(ip);
+ }
+ } else {
+ // For more locals a loop in generated code is more compact.
+ Label alloc_locals_loop;
+ __ mov(r1, Operand(count));
+ __ bind(&alloc_locals_loop);
+ __ push(ip);
+ __ sub(r1, r1, Operand(1), SetCC);
+ __ b(ne, &alloc_locals_loop);
+ }
+ } else {
+ __ LoadRoot(r2, Heap::kStackLimitRootIndex);
}
// Check the stack for overflow or a break request.
// Put the lr setup instruction in the delay slot. The kInstrSize is added
@@ -387,6 +400,13 @@
}
+void VirtualFrame::EmitPushMultiple(int count, int src_regs) {
+ ASSERT(stack_pointer_ == element_count() - 1);
+ Adjust(count);
+ __ stm(db_w, sp, src_regs);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index d523000..b2f0eea 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -180,6 +180,9 @@
// shared return site. Emits code for spills.
void PrepareForReturn();
+ // Number of local variables after when we use a loop for allocating.
+ static const int kLocalVarBound = 5;
+
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots();
@@ -346,6 +349,11 @@
// corresponding push instruction.
void EmitPush(Register reg);
+ // Push multiple registers on the stack and the virtual frame
+ // Register are selected by setting bit in src_regs and
+ // are pushed in decreasing order: r15 .. r0.
+ void EmitPushMultiple(int count, int src_regs);
+
// Push an element on the virtual frame.
void Push(Register reg);
void Push(Handle<Object> value);
diff --git a/src/array.js b/src/array.js
index 20d884e..c3ab179 100644
--- a/src/array.js
+++ b/src/array.js
@@ -70,19 +70,22 @@
// Optimized for sparse arrays if separator is ''.
function SparseJoin(array, len, convert) {
var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
- var builder = new StringBuilder();
var last_key = -1;
var keys_length = keys.length;
+
+ var elements = new $Array(keys_length);
+ var elements_length = 0;
+
for (var i = 0; i < keys_length; i++) {
var key = keys[i];
if (key != last_key) {
var e = array[key];
- if (typeof(e) !== 'string') e = convert(e);
- builder.add(e);
+ if (!IS_STRING(e)) e = convert(e);
+ elements[elements_length++] = e;
last_key = key;
}
}
- return builder.generate();
+ return %StringBuilderConcat(elements, elements_length, '');
}
@@ -107,7 +110,7 @@
// Attempt to convert the elements.
try {
- if (UseSparseVariant(array, length, is_array) && separator === '') {
+ if (UseSparseVariant(array, length, is_array) && (separator.length == 0)) {
return SparseJoin(array, length, convert);
}
@@ -115,39 +118,37 @@
if (length == 1) {
var e = array[0];
if (!IS_UNDEFINED(e) || (0 in array)) {
- if (typeof(e) === 'string') return e;
+ if (IS_STRING(e)) return e;
return convert(e);
}
}
- var builder = new StringBuilder();
+ // Construct an array for the elements.
+ var elements;
+ var elements_length = 0;
// We pull the empty separator check outside the loop for speed!
if (separator.length == 0) {
+ elements = new $Array(length);
for (var i = 0; i < length; i++) {
var e = array[i];
if (!IS_UNDEFINED(e) || (i in array)) {
- if (typeof(e) !== 'string') e = convert(e);
- if (e.length > 0) {
- var elements = builder.elements;
- elements[elements.length] = e;
- }
+ if (!IS_STRING(e)) e = convert(e);
+ elements[elements_length++] = e;
}
}
} else {
+ elements = new $Array(length << 1);
for (var i = 0; i < length; i++) {
var e = array[i];
- if (i != 0) builder.add(separator);
+ if (i != 0) elements[elements_length++] = separator;
if (!IS_UNDEFINED(e) || (i in array)) {
- if (typeof(e) !== 'string') e = convert(e);
- if (e.length > 0) {
- var elements = builder.elements;
- elements[elements.length] = e;
- }
+ if (!IS_STRING(e)) e = convert(e);
+ elements[elements_length++] = e;
}
}
}
- return builder.generate();
+ return %StringBuilderConcat(elements, elements_length, '');
} finally {
// Make sure to pop the visited array no matter what happens.
if (is_array) visited_arrays.pop();
@@ -156,16 +157,15 @@
function ConvertToString(e) {
- if (typeof(e) === 'string') return e;
if (e == null) return '';
else return ToString(e);
}
function ConvertToLocaleString(e) {
- if (typeof(e) === 'string') return e;
- if (e == null) return '';
- else {
+ if (e == null) {
+ return '';
+ } else {
// e_obj's toLocaleString might be overwritten, check if it is a function.
// Call ToString if toLocaleString is not a function.
// See issue 877615.
@@ -359,16 +359,20 @@
function ArrayJoin(separator) {
- if (IS_UNDEFINED(separator)) separator = ',';
- else separator = ToString(separator);
- return Join(this, ToUint32(this.length), separator, ConvertToString);
+ if (IS_UNDEFINED(separator)) {
+ separator = ',';
+ } else if (!IS_STRING(separator)) {
+ separator = ToString(separator);
+ }
+ var length = TO_UINT32(this.length);
+ return Join(this, length, separator, ConvertToString);
}
// Removes the last element from the array and returns it. See
// ECMA-262, section 15.4.4.6.
function ArrayPop() {
- var n = ToUint32(this.length);
+ var n = TO_UINT32(this.length);
if (n == 0) {
this.length = n;
return;
@@ -384,7 +388,7 @@
// Appends the arguments to the end of the array and returns the new
// length of the array. See ECMA-262, section 15.4.4.7.
function ArrayPush() {
- var n = ToUint32(this.length);
+ var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();
for (var i = 0; i < m; i++) {
this[i+n] = %_Arguments(i);
@@ -452,7 +456,7 @@
function ArrayReverse() {
- var j = ToUint32(this.length) - 1;
+ var j = TO_UINT32(this.length) - 1;
if (UseSparseVariant(this, j, IS_ARRAY(this))) {
SparseReverse(this, j+1);
@@ -483,7 +487,7 @@
function ArrayShift() {
- var len = ToUint32(this.length);
+ var len = TO_UINT32(this.length);
if (len === 0) {
this.length = 0;
@@ -504,7 +508,7 @@
function ArrayUnshift(arg1) { // length == 1
- var len = ToUint32(this.length);
+ var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
if (IS_ARRAY(this))
@@ -523,7 +527,7 @@
function ArraySlice(start, end) {
- var len = ToUint32(this.length);
+ var len = TO_UINT32(this.length);
var start_i = TO_INTEGER(start);
var end_i = len;
@@ -568,7 +572,7 @@
// compatibility.
if (num_arguments == 0) return;
- var len = ToUint32(this.length);
+ var len = TO_UINT32(this.length);
var start_i = TO_INTEGER(start);
if (start_i < 0) {
@@ -850,7 +854,7 @@
return first_undefined;
}
- length = ToUint32(this.length);
+ length = TO_UINT32(this.length);
if (length < 2) return this;
var is_array = IS_ARRAY(this);
@@ -915,7 +919,7 @@
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
- var length = this.length;
+ var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
@@ -933,7 +937,7 @@
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
- var length = this.length;
+ var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
@@ -950,25 +954,23 @@
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
- var length = this.length;
+ var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
if (!f.call(receiver, current, i, this)) return false;
}
}
-
return true;
}
-
function ArrayMap(f, receiver) {
if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
- var length = this.length;
+ var length = TO_UINT32(this.length);
var result = new $Array(length);
for (var i = 0; i < length; i++) {
var current = this[i];
diff --git a/src/assembler.cc b/src/assembler.cc
index 9c9ddcd..fcdb14a 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -44,6 +44,7 @@
#include "regexp-stack.h"
#include "ast.h"
#include "regexp-macro-assembler.h"
+#include "platform.h"
// Include native regexp-macro-assembler.
#ifdef V8_NATIVE_REGEXP
#if V8_TARGET_ARCH_IA32
@@ -563,13 +564,18 @@
}
-ExternalReference ExternalReference::builtin_passed_function() {
- return ExternalReference(&Builtins::builtin_passed_function);
+ExternalReference ExternalReference::random_positive_smi_function() {
+ return ExternalReference(Redirect(FUNCTION_ADDR(V8::RandomPositiveSmi)));
}
-ExternalReference ExternalReference::random_positive_smi_function() {
- return ExternalReference(Redirect(FUNCTION_ADDR(V8::RandomPositiveSmi)));
+ExternalReference ExternalReference::keyed_lookup_cache_keys() {
+ return ExternalReference(KeyedLookupCache::keys_address());
+}
+
+
+ExternalReference ExternalReference::keyed_lookup_cache_field_offsets() {
+ return ExternalReference(KeyedLookupCache::field_offsets_address());
}
@@ -664,6 +670,23 @@
FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
}
+ExternalReference ExternalReference::re_word_character_map() {
+ return ExternalReference(
+ NativeRegExpMacroAssembler::word_character_map_address());
+}
+
+ExternalReference ExternalReference::address_of_static_offsets_vector() {
+ return ExternalReference(OffsetsVector::static_offsets_vector_address());
+}
+
+ExternalReference ExternalReference::address_of_regexp_stack_memory_address() {
+ return ExternalReference(RegExpStack::memory_address());
+}
+
+ExternalReference ExternalReference::address_of_regexp_stack_memory_size() {
+ return ExternalReference(RegExpStack::memory_size_address());
+}
+
#endif
@@ -688,13 +711,13 @@
static double mod_two_doubles(double x, double y) {
- return fmod(x, y);
+ return modulo(x, y);
}
-static int native_compare_doubles(double x, double y) {
- if (x == y) return 0;
- return x < y ? 1 : -1;
+static int native_compare_doubles(double y, double x) {
+ if (x == y) return EQUAL;
+ return x < y ? LESS : GREATER;
}
diff --git a/src/assembler.h b/src/assembler.h
index aecd4cd..ec47d57 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -398,9 +398,12 @@
// ExternalReferenceTable in serialize.cc manually.
static ExternalReference perform_gc_function();
- static ExternalReference builtin_passed_function();
static ExternalReference random_positive_smi_function();
+ // Static data in the keyed lookup cache.
+ static ExternalReference keyed_lookup_cache_keys();
+ static ExternalReference keyed_lookup_cache_field_offsets();
+
// Static variable Factory::the_hole_value.location()
static ExternalReference the_hole_value_location();
@@ -416,6 +419,11 @@
// Static variable RegExpStack::limit_address()
static ExternalReference address_of_regexp_stack_limit();
+ // Static variables for RegExp.
+ static ExternalReference address_of_static_offsets_vector();
+ static ExternalReference address_of_regexp_stack_memory_address();
+ static ExternalReference address_of_regexp_stack_memory_size();
+
// Static variable Heap::NewSpaceStart()
static ExternalReference new_space_start();
static ExternalReference heap_always_allocate_scope_depth();
@@ -454,6 +462,10 @@
// Function NativeRegExpMacroAssembler::GrowStack()
static ExternalReference re_grow_stack();
+
+ // byte NativeRegExpMacroAssembler::word_character_bitmap
+ static ExternalReference re_word_character_map();
+
#endif
// This lets you register a function that rewrites all external references.
diff --git a/src/ast.cc b/src/ast.cc
index 90b5ed6..4edcf6d 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -433,7 +433,7 @@
} else {
stream()->Add("%i ", that->max());
}
- stream()->Add(that->is_greedy() ? "g " : "n ");
+ stream()->Add(that->is_greedy() ? "g " : that->is_possessive() ? "p " : "n ");
that->body()->Accept(this, data);
stream()->Add(")");
return NULL;
diff --git a/src/ast.h b/src/ast.h
index c27d558..e753a52 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -139,6 +139,7 @@
virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
+ virtual CompareOperation* AsCompareOperation() { return NULL; }
};
@@ -179,27 +180,26 @@
kTestValue
};
- Expression() : context_(kUninitialized) {}
-
virtual Expression* AsExpression() { return this; }
virtual bool IsValidJSON() { return false; }
virtual bool IsValidLeftHandSide() { return false; }
+ // Symbols that cannot be parsed as array indices are considered property
+ // names. We do not treat symbols that can be array indexes as property
+ // names because [] for string objects is handled only by keyed ICs.
+ virtual bool IsPropertyName() { return false; }
+
// Mark the expression as being compiled as an expression
// statement. This is used to transform postfix increments to
// (faster) prefix increments.
virtual void MarkAsStatement() { /* do nothing */ }
// Static type information for this expression.
- SmiAnalysis* type() { return &type_; }
-
- Context context() { return context_; }
- void set_context(Context context) { context_ = context; }
+ StaticType* type() { return &type_; }
private:
- SmiAnalysis type_;
- Context context_;
+ StaticType type_;
};
@@ -641,21 +641,20 @@
class TryCatchStatement: public TryStatement {
public:
TryCatchStatement(Block* try_block,
- Expression* catch_var,
+ VariableProxy* catch_var,
Block* catch_block)
: TryStatement(try_block),
catch_var_(catch_var),
catch_block_(catch_block) {
- ASSERT(catch_var->AsVariableProxy() != NULL);
}
virtual void Accept(AstVisitor* v);
- Expression* catch_var() const { return catch_var_; }
+ VariableProxy* catch_var() const { return catch_var_; }
Block* catch_block() const { return catch_block_; }
private:
- Expression* catch_var_;
+ VariableProxy* catch_var_;
Block* catch_block_;
};
@@ -706,6 +705,14 @@
virtual bool IsValidJSON() { return true; }
+ virtual bool IsPropertyName() {
+ if (handle_->IsSymbol()) {
+ uint32_t ignored;
+ return !String::cast(*handle_)->AsArrayIndex(&ignored);
+ }
+ return false;
+ }
+
// Identity testers.
bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
@@ -826,24 +833,24 @@
// for minimizing the work when constructing it at runtime.
class ArrayLiteral: public MaterializedLiteral {
public:
- ArrayLiteral(Handle<FixedArray> literals,
+ ArrayLiteral(Handle<FixedArray> constant_elements,
ZoneList<Expression*>* values,
int literal_index,
bool is_simple,
int depth)
: MaterializedLiteral(literal_index, is_simple, depth),
- literals_(literals),
+ constant_elements_(constant_elements),
values_(values) {}
virtual void Accept(AstVisitor* v);
virtual ArrayLiteral* AsArrayLiteral() { return this; }
virtual bool IsValidJSON();
- Handle<FixedArray> literals() const { return literals_; }
+ Handle<FixedArray> constant_elements() const { return constant_elements_; }
ZoneList<Expression*>* values() const { return values_; }
private:
- Handle<FixedArray> literals_;
+ Handle<FixedArray> constant_elements_;
ZoneList<Expression*>* values_;
};
@@ -1171,6 +1178,9 @@
bool is_prefix() const { return is_prefix_; }
bool is_postfix() const { return !is_prefix_; }
Token::Value op() const { return op_; }
+ Token::Value binary_op() {
+ return op_ == Token::INC ? Token::ADD : Token::SUB;
+ }
Expression* expression() const { return expression_; }
virtual void MarkAsStatement() { is_prefix_ = true; }
@@ -1185,7 +1195,7 @@
class CompareOperation: public Expression {
public:
CompareOperation(Token::Value op, Expression* left, Expression* right)
- : op_(op), left_(left), right_(right) {
+ : op_(op), left_(left), right_(right), is_for_loop_condition_(false) {
ASSERT(Token::IsCompareOp(op));
}
@@ -1195,10 +1205,18 @@
Expression* left() const { return left_; }
Expression* right() const { return right_; }
+ // Accessors for flag whether this compare operation is hanging of a for loop.
+ bool is_for_loop_condition() const { return is_for_loop_condition_; }
+ void set_is_for_loop_condition() { is_for_loop_condition_ = true; }
+
+ // Type testing & conversion
+ virtual CompareOperation* AsCompareOperation() { return this; }
+
private:
Token::Value op_;
Expression* left_;
Expression* right_;
+ bool is_for_loop_condition_;
};
@@ -1241,6 +1259,8 @@
Expression* target() const { return target_; }
Expression* value() const { return value_; }
int position() { return pos_; }
+ // This check relies on the definition order of token in token.h.
+ bool is_compound() const { return op() > Token::ASSIGN; }
// An initialization block is a series of statments of the form
// x.y.z.a = ...; x.y.z.b = ...; etc. The parser marks the beginning and
@@ -1515,6 +1535,7 @@
standard_set_type_ = special_set_type;
}
bool is_standard() { return standard_set_type_ != 0; }
+ void Canonicalize();
private:
ZoneList<CharacterRange>* ranges_;
// If non-zero, the value represents a standard set (e.g., all whitespace
@@ -1608,12 +1629,13 @@
class RegExpQuantifier: public RegExpTree {
public:
- RegExpQuantifier(int min, int max, bool is_greedy, RegExpTree* body)
- : min_(min),
+ enum Type { GREEDY, NON_GREEDY, POSSESSIVE };
+ RegExpQuantifier(int min, int max, Type type, RegExpTree* body)
+ : body_(body),
+ min_(min),
max_(max),
- is_greedy_(is_greedy),
- body_(body),
- min_match_(min * body->min_match()) {
+ min_match_(min * body->min_match()),
+ type_(type) {
if (max > 0 && body->max_match() > kInfinity / max) {
max_match_ = kInfinity;
} else {
@@ -1637,15 +1659,17 @@
virtual int max_match() { return max_match_; }
int min() { return min_; }
int max() { return max_; }
- bool is_greedy() { return is_greedy_; }
+ bool is_possessive() { return type_ == POSSESSIVE; }
+ bool is_non_greedy() { return type_ == NON_GREEDY; }
+ bool is_greedy() { return type_ == GREEDY; }
RegExpTree* body() { return body_; }
private:
+ RegExpTree* body_;
int min_;
int max_;
- bool is_greedy_;
- RegExpTree* body_;
int min_match_;
int max_match_;
+ Type type_;
};
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index deda96f..9eacf57 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -95,6 +95,8 @@
static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION);
// This is for delete, not delete[].
static List<char*>* delete_these_non_arrays_on_tear_down = NULL;
+// This is for delete[]
+static List<char*>* delete_these_arrays_on_tear_down = NULL;
NativesExternalStringResource::NativesExternalStringResource(const char* source)
@@ -150,17 +152,41 @@
}
+char* Bootstrapper::AllocateAutoDeletedArray(int bytes) {
+ char* memory = new char[bytes];
+ if (memory != NULL) {
+ if (delete_these_arrays_on_tear_down == NULL) {
+ delete_these_arrays_on_tear_down = new List<char*>(2);
+ }
+ delete_these_arrays_on_tear_down->Add(memory);
+ }
+ return memory;
+}
+
+
void Bootstrapper::TearDown() {
if (delete_these_non_arrays_on_tear_down != NULL) {
int len = delete_these_non_arrays_on_tear_down->length();
ASSERT(len < 20); // Don't use this mechanism for unbounded allocations.
for (int i = 0; i < len; i++) {
delete delete_these_non_arrays_on_tear_down->at(i);
+ delete_these_non_arrays_on_tear_down->at(i) = NULL;
}
delete delete_these_non_arrays_on_tear_down;
delete_these_non_arrays_on_tear_down = NULL;
}
+ if (delete_these_arrays_on_tear_down != NULL) {
+ int len = delete_these_arrays_on_tear_down->length();
+ ASSERT(len < 1000); // Don't use this mechanism for unbounded allocations.
+ for (int i = 0; i < len; i++) {
+ delete[] delete_these_arrays_on_tear_down->at(i);
+ delete_these_arrays_on_tear_down->at(i) = NULL;
+ }
+ delete delete_these_arrays_on_tear_down;
+ delete_these_arrays_on_tear_down = NULL;
+ }
+
natives_cache.Initialize(false); // Yes, symmetrical
extensions_cache.Initialize(false);
}
@@ -966,6 +992,7 @@
INSTALL_NATIVE(JSFunction, "ToUint32", to_uint32_fun);
INSTALL_NATIVE(JSFunction, "ToInt32", to_int32_fun);
INSTALL_NATIVE(JSFunction, "ToBoolean", to_boolean_fun);
+ INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun);
INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
INSTALL_NATIVE(JSFunction, "ConfigureTemplateInstance",
configure_instance_fun);
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 07d2747..7cd3a2b 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -74,6 +74,10 @@
static char* ArchiveState(char* to);
static char* RestoreState(char* from);
static void FreeThreadResources();
+
+ // This will allocate a char array that is deleted when V8 is shut down.
+ // It should only be used for strictly finite allocations.
+ static char* AllocateAutoDeletedArray(int bytes);
};
diff --git a/src/builtins.cc b/src/builtins.cc
index b66635c..9db2230 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -36,8 +36,78 @@
namespace v8 {
namespace internal {
+namespace {
+
+// Arguments object passed to C++ builtins.
+template <BuiltinExtraArguments extra_args>
+class BuiltinArguments : public Arguments {
+ public:
+ BuiltinArguments(int length, Object** arguments)
+ : Arguments(length, arguments) { }
+
+ Object*& operator[] (int index) {
+ ASSERT(index < length());
+ return Arguments::operator[](index);
+ }
+
+ template <class S> Handle<S> at(int index) {
+ ASSERT(index < length());
+ return Arguments::at<S>(index);
+ }
+
+ Handle<Object> receiver() {
+ return Arguments::at<Object>(0);
+ }
+
+ Handle<JSFunction> called_function() {
+ STATIC_ASSERT(extra_args == NEEDS_CALLED_FUNCTION);
+ return Arguments::at<JSFunction>(Arguments::length() - 1);
+ }
+
+ // Gets the total number of arguments including the receiver (but
+ // excluding extra arguments).
+ int length() const {
+ STATIC_ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ return Arguments::length();
+ }
+
+#ifdef DEBUG
+ void Verify() {
+ // Check we have at least the receiver.
+ ASSERT(Arguments::length() >= 1);
+ }
+#endif
+};
+
+
+// Specialize BuiltinArguments for the called function extra argument.
+
+template <>
+int BuiltinArguments<NEEDS_CALLED_FUNCTION>::length() const {
+ return Arguments::length() - 1;
+}
+
+#ifdef DEBUG
+template <>
+void BuiltinArguments<NEEDS_CALLED_FUNCTION>::Verify() {
+ // Check we have at least the receiver and the called function.
+ ASSERT(Arguments::length() >= 2);
+ // Make sure cast to JSFunction succeeds.
+ called_function();
+}
+#endif
+
+
+#define DEF_ARG_TYPE(name, spec) \
+ typedef BuiltinArguments<spec> name##ArgumentsType;
+BUILTIN_LIST_C(DEF_ARG_TYPE)
+#undef DEF_ARG_TYPE
+
+} // namespace
+
+
// ----------------------------------------------------------------------------
-// Support macros for defining builtins in C.
+// Support macro for defining builtins in C++.
// ----------------------------------------------------------------------------
//
// A builtin function is defined by writing:
@@ -45,30 +115,26 @@
// BUILTIN(name) {
// ...
// }
-// BUILTIN_END
//
-// In the body of the builtin function, the variable 'receiver' is visible.
-// The arguments can be accessed through the Arguments object args.
-//
-// args[0]: Receiver (also available as 'receiver')
-// args[1]: First argument
-// ...
-// args[n]: Last argument
-// args.length(): Number of arguments including the receiver.
-// ----------------------------------------------------------------------------
+// In the body of the builtin function the arguments can be accessed
+// through the BuiltinArguments object args.
+#ifdef DEBUG
-// TODO(428): We should consider passing whether or not the
-// builtin was invoked as a constructor as part of the
-// arguments. Maybe we also want to pass the called function?
-#define BUILTIN(name) \
- static Object* Builtin_##name(Arguments args) { \
- Handle<Object> receiver = args.at<Object>(0);
+#define BUILTIN(name) \
+ static Object* Builtin_Impl_##name(name##ArgumentsType args); \
+ static Object* Builtin_##name(name##ArgumentsType args) { \
+ args.Verify(); \
+ return Builtin_Impl_##name(args); \
+ } \
+ static Object* Builtin_Impl_##name(name##ArgumentsType args)
+#else // For release mode.
-#define BUILTIN_END \
- return Heap::undefined_value(); \
-}
+#define BUILTIN(name) \
+ static Object* Builtin_##name(name##ArgumentsType args)
+
+#endif
static inline bool CalledAsConstructor() {
@@ -126,13 +192,13 @@
BUILTIN(Illegal) {
UNREACHABLE();
+ return Heap::undefined_value(); // Make compiler happy.
}
-BUILTIN_END
BUILTIN(EmptyFunction) {
+ return Heap::undefined_value();
}
-BUILTIN_END
BUILTIN(ArrayCodeGeneric) {
@@ -140,7 +206,7 @@
JSArray* array;
if (CalledAsConstructor()) {
- array = JSArray::cast(*receiver);
+ array = JSArray::cast(*args.receiver());
} else {
// Allocate the JS Array
JSFunction* constructor =
@@ -194,11 +260,10 @@
return array;
}
-BUILTIN_END
BUILTIN(ArrayPush) {
- JSArray* array = JSArray::cast(*receiver);
+ JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
// Make sure we have space for the elements.
@@ -233,11 +298,10 @@
array->set_length(Smi::FromInt(new_length), SKIP_WRITE_BARRIER);
return array->length();
}
-BUILTIN_END
BUILTIN(ArrayPop) {
- JSArray* array = JSArray::cast(*receiver);
+ JSArray* array = JSArray::cast(*args.receiver());
ASSERT(array->HasFastElements());
Object* undefined = Heap::undefined_value();
@@ -265,7 +329,6 @@
return top;
}
-BUILTIN_END
// -----------------------------------------------------------------------------
@@ -320,20 +383,20 @@
}
-BUILTIN(HandleApiCall) {
- HandleScope scope;
- bool is_construct = CalledAsConstructor();
+template <bool is_construct>
+static Object* HandleApiCallHelper(
+ BuiltinArguments<NEEDS_CALLED_FUNCTION> args) {
+ ASSERT(is_construct == CalledAsConstructor());
- // TODO(428): Remove use of static variable, handle API callbacks directly.
- Handle<JSFunction> function =
- Handle<JSFunction>(JSFunction::cast(Builtins::builtin_passed_function));
+ HandleScope scope;
+ Handle<JSFunction> function = args.called_function();
if (is_construct) {
Handle<FunctionTemplateInfo> desc =
Handle<FunctionTemplateInfo>(
FunctionTemplateInfo::cast(function->shared()->function_data()));
bool pending_exception = false;
- Factory::ConfigureInstance(desc, Handle<JSObject>::cast(receiver),
+ Factory::ConfigureInstance(desc, Handle<JSObject>::cast(args.receiver()),
&pending_exception);
ASSERT(Top::has_pending_exception() == pending_exception);
if (pending_exception) return Failure::Exception();
@@ -359,15 +422,13 @@
Object* data_obj = call_data->data();
Object* result;
- v8::Local<v8::Object> self =
- v8::Utils::ToLocal(Handle<JSObject>::cast(receiver));
Handle<Object> data_handle(data_obj);
v8::Local<v8::Value> data = v8::Utils::ToLocal(data_handle);
ASSERT(raw_holder->IsJSObject());
v8::Local<v8::Function> callee = v8::Utils::ToLocal(function);
Handle<JSObject> holder_handle(JSObject::cast(raw_holder));
v8::Local<v8::Object> holder = v8::Utils::ToLocal(holder_handle);
- LOG(ApiObjectAccess("call", JSObject::cast(*receiver)));
+ LOG(ApiObjectAccess("call", JSObject::cast(*args.receiver())));
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
data,
holder,
@@ -395,16 +456,26 @@
if (!is_construct || result->IsJSObject()) return result;
}
- return *receiver;
+ return *args.receiver();
}
-BUILTIN_END
+
+
+BUILTIN(HandleApiCall) {
+ return HandleApiCallHelper<false>(args);
+}
+
+
+BUILTIN(HandleApiCallConstruct) {
+ return HandleApiCallHelper<true>(args);
+}
// Helper function to handle calls to non-function objects created through the
// API. The object can be called as either a constructor (using new) or just as
// a function (without new).
-static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
- Arguments args) {
+static Object* HandleApiCallAsFunctionOrConstructor(
+ bool is_construct_call,
+ BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
// Non-functions are never called as constructors. Even if this is an object
// called as a constructor the delegate call is not a construct call.
ASSERT(!CalledAsConstructor());
@@ -412,7 +483,7 @@
Handle<Object> receiver = args.at<Object>(0);
// Get the object called.
- JSObject* obj = JSObject::cast(*receiver);
+ JSObject* obj = JSObject::cast(*args.receiver());
// Get the invocation callback from the function descriptor that was
// used to create the called object.
@@ -432,12 +503,12 @@
Object* result;
{ HandleScope scope;
v8::Local<v8::Object> self =
- v8::Utils::ToLocal(Handle<JSObject>::cast(receiver));
+ v8::Utils::ToLocal(Handle<JSObject>::cast(args.receiver()));
Handle<Object> data_handle(data_obj);
v8::Local<v8::Value> data = v8::Utils::ToLocal(data_handle);
Handle<JSFunction> callee_handle(constructor);
v8::Local<v8::Function> callee = v8::Utils::ToLocal(callee_handle);
- LOG(ApiObjectAccess("call non-function", JSObject::cast(*receiver)));
+ LOG(ApiObjectAccess("call non-function", JSObject::cast(*args.receiver())));
v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
data,
self,
@@ -471,7 +542,6 @@
BUILTIN(HandleApiCallAsFunction) {
return HandleApiCallAsFunctionOrConstructor(false, args);
}
-BUILTIN_END
// Handle calls to non-function objects created through the API. This delegate
@@ -479,14 +549,6 @@
BUILTIN(HandleApiCallAsConstructor) {
return HandleApiCallAsFunctionOrConstructor(true, args);
}
-BUILTIN_END
-
-
-// TODO(1238487): This is a nasty hack. We need to improve the way we
-// call builtins considerable to get rid of this and the hairy macros
-// in builtins.cc.
-Object* Builtins::builtin_passed_function;
-
static void Generate_LoadIC_ArrayLength(MacroAssembler* masm) {
@@ -544,6 +606,11 @@
}
+static void Generate_KeyedLoadIC_String(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateString(masm);
+}
+
+
static void Generate_KeyedLoadIC_ExternalByteArray(MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalByteArray);
}
@@ -703,7 +770,7 @@
Object* Builtins::builtins_[builtin_count] = { NULL, };
const char* Builtins::names_[builtin_count] = { NULL, };
-#define DEF_ENUM_C(name) FUNCTION_ADDR(Builtin_##name),
+#define DEF_ENUM_C(name, ignore) FUNCTION_ADDR(Builtin_##name),
Address Builtins::c_functions_[cfunction_count] = {
BUILTIN_LIST_C(DEF_ENUM_C)
};
@@ -734,14 +801,16 @@
const char* s_name; // name is only used for generating log information.
int name;
Code::Flags flags;
+ BuiltinExtraArguments extra_args;
};
-#define DEF_FUNCTION_PTR_C(name) \
- { FUNCTION_ADDR(Generate_Adaptor), \
- FUNCTION_ADDR(Builtin_##name), \
- #name, \
- c_##name, \
- Code::ComputeFlags(Code::BUILTIN) \
+#define DEF_FUNCTION_PTR_C(name, extra_args) \
+ { FUNCTION_ADDR(Generate_Adaptor), \
+ FUNCTION_ADDR(Builtin_##name), \
+ #name, \
+ c_##name, \
+ Code::ComputeFlags(Code::BUILTIN), \
+ extra_args \
},
#define DEF_FUNCTION_PTR_A(name, kind, state) \
@@ -749,7 +818,8 @@
NULL, \
#name, \
name, \
- Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state) \
+ Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state), \
+ NO_EXTRA_ARGUMENTS \
},
// Define array of pointers to generators and C builtin functions.
@@ -758,7 +828,8 @@
BUILTIN_LIST_A(DEF_FUNCTION_PTR_A)
BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A)
// Terminator:
- { NULL, NULL, NULL, builtin_count, static_cast<Code::Flags>(0) }
+ { NULL, NULL, NULL, builtin_count, static_cast<Code::Flags>(0),
+ NO_EXTRA_ARGUMENTS }
};
#undef DEF_FUNCTION_PTR_C
@@ -774,12 +845,12 @@
if (create_heap_objects) {
MacroAssembler masm(buffer, sizeof buffer);
// Generate the code/adaptor.
- typedef void (*Generator)(MacroAssembler*, int);
+ typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
// We pass all arguments to the generator, but it may not use all of
// them. This works because the first arguments are on top of the
// stack.
- g(&masm, functions[i].name);
+ g(&masm, functions[i].name, functions[i].extra_args);
// Move the code into the object heap.
CodeDesc desc;
masm.GetCode(&desc);
diff --git a/src/builtins.h b/src/builtins.h
index bc32c49..418948f 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -31,20 +31,28 @@
namespace v8 {
namespace internal {
-// Define list of builtins implemented in C.
-#define BUILTIN_LIST_C(V) \
- V(Illegal) \
- \
- V(EmptyFunction) \
- \
- V(ArrayCodeGeneric) \
- \
- V(ArrayPush) \
- V(ArrayPop) \
- \
- V(HandleApiCall) \
- V(HandleApiCallAsFunction) \
- V(HandleApiCallAsConstructor)
+// Specifies extra arguments required by a C++ builtin.
+enum BuiltinExtraArguments {
+ NO_EXTRA_ARGUMENTS = 0,
+ NEEDS_CALLED_FUNCTION = 1
+};
+
+
+// Define list of builtins implemented in C++.
+#define BUILTIN_LIST_C(V) \
+ V(Illegal, NO_EXTRA_ARGUMENTS) \
+ \
+ V(EmptyFunction, NO_EXTRA_ARGUMENTS) \
+ \
+ V(ArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \
+ \
+ V(ArrayPush, NO_EXTRA_ARGUMENTS) \
+ V(ArrayPop, NO_EXTRA_ARGUMENTS) \
+ \
+ V(HandleApiCall, NEEDS_CALLED_FUNCTION) \
+ V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \
+ V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \
+ V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS)
// Define list of builtins implemented in assembly.
@@ -52,6 +60,7 @@
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructCall, BUILTIN, UNINITIALIZED) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED) \
+ V(JSConstructStubApi, BUILTIN, UNINITIALIZED) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \
\
@@ -74,6 +83,7 @@
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC) \
+ V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalUnsignedByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalShortArray, KEYED_LOAD_IC, MEGAMORPHIC) \
@@ -147,7 +157,8 @@
V(STRING_ADD_LEFT, 1) \
V(STRING_ADD_RIGHT, 1) \
V(APPLY_PREPARE, 1) \
- V(APPLY_OVERFLOW, 1)
+ V(APPLY_OVERFLOW, 1) \
+ V(STRING_CHAR_AT, 1)
class ObjectVisitor;
@@ -167,7 +178,7 @@
static const char* Lookup(byte* pc);
enum Name {
-#define DEF_ENUM_C(name) name,
+#define DEF_ENUM_C(name, ignore) name,
#define DEF_ENUM_A(name, kind, state) name,
BUILTIN_LIST_C(DEF_ENUM_C)
BUILTIN_LIST_A(DEF_ENUM_A)
@@ -178,7 +189,7 @@
};
enum CFunctionId {
-#define DEF_ENUM_C(name) c_##name,
+#define DEF_ENUM_C(name, ignore) c_##name,
BUILTIN_LIST_C(DEF_ENUM_C)
#undef DEF_ENUM_C
cfunction_count
@@ -210,8 +221,6 @@
static Handle<Code> GetCode(JavaScript id, bool* resolved);
static int NumberOfJavaScriptBuiltins() { return id_count; }
- static Object* builtin_passed_function;
-
private:
// The external C++ functions called from the code.
static Address c_functions_[cfunction_count];
@@ -224,9 +233,12 @@
static const char* javascript_names_[id_count];
static int javascript_argc_[id_count];
- static void Generate_Adaptor(MacroAssembler* masm, CFunctionId id);
+ static void Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args);
static void Generate_JSConstructCall(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
+ static void Generate_JSConstructStubApi(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index dbc39ff..09581aa 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -35,82 +35,117 @@
namespace v8 {
namespace internal {
-Handle<Code> CodeStub::GetCode() {
- bool custom_cache = has_custom_cache();
-
- int index = 0;
- uint32_t key = 0;
- if (custom_cache) {
- Code* cached;
- if (GetCustomCache(&cached)) {
- return Handle<Code>(cached);
- } else {
- index = NumberDictionary::kNotFound;
- }
- } else {
- key = GetKey();
- index = Heap::code_stubs()->FindEntry(key);
- if (index != NumberDictionary::kNotFound)
- return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
+bool CodeStub::FindCodeInCache(Code** code_out) {
+ if (has_custom_cache()) return GetCustomCache(code_out);
+ int index = Heap::code_stubs()->FindEntry(GetKey());
+ if (index != NumberDictionary::kNotFound) {
+ *code_out = Code::cast(Heap::code_stubs()->ValueAt(index));
+ return true;
}
+ return false;
+}
- Code* result;
- {
+
+void CodeStub::GenerateCode(MacroAssembler* masm) {
+ // Update the static counter each time a new code stub is generated.
+ Counters::code_stubs.Increment();
+ // Nested stubs are not allowed for leafs.
+ masm->set_allow_stub_calls(AllowsStubCalls());
+ // Generate the code for the stub.
+ masm->set_generating_stub(true);
+ Generate(masm);
+}
+
+
+void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
+ code->set_major_key(MajorKey());
+
+ // Add unresolved entries in the code to the fixup list.
+ Bootstrapper::AddFixup(code, masm);
+
+ LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
+ Counters::total_stubs_code_size.Increment(code->instruction_size());
+
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_code_stubs) {
+#ifdef DEBUG
+ Print();
+#endif
+ code->Disassemble(GetName());
+ PrintF("\n");
+ }
+#endif
+}
+
+
+Handle<Code> CodeStub::GetCode() {
+ Code* code;
+ if (!FindCodeInCache(&code)) {
v8::HandleScope scope;
- // Update the static counter each time a new code stub is generated.
- Counters::code_stubs.Increment();
-
// Generate the new code.
MacroAssembler masm(NULL, 256);
-
- // Nested stubs are not allowed for leafs.
- masm.set_allow_stub_calls(AllowsStubCalls());
-
- // Generate the code for the stub.
- masm.set_generating_stub(true);
- Generate(&masm);
+ GenerateCode(&masm);
// Create the code object.
CodeDesc desc;
masm.GetCode(&desc);
- // Copy the generated code into a heap object, and store the major key.
+ // Copy the generated code into a heap object.
Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
- Handle<Code> code = Factory::NewCode(desc, NULL, flags, masm.CodeObject());
- code->set_major_key(MajorKey());
+ Handle<Code> new_object =
+ Factory::NewCode(desc, NULL, flags, masm.CodeObject());
+ RecordCodeGeneration(*new_object, &masm);
- // Add unresolved entries in the code to the fixup list.
- Bootstrapper::AddFixup(*code, &masm);
-
- LOG(CodeCreateEvent(Logger::STUB_TAG, *code, GetName()));
- Counters::total_stubs_code_size.Increment(code->instruction_size());
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code_stubs) {
-#ifdef DEBUG
- Print();
-#endif
- code->Disassemble(GetName());
- PrintF("\n");
- }
-#endif
-
- if (custom_cache) {
- SetCustomCache(*code);
+ if (has_custom_cache()) {
+ SetCustomCache(*new_object);
} else {
// Update the dictionary and the root in Heap.
Handle<NumberDictionary> dict =
Factory::DictionaryAtNumberPut(
Handle<NumberDictionary>(Heap::code_stubs()),
- key,
- code);
+ GetKey(),
+ new_object);
Heap::public_set_code_stubs(*dict);
}
- result = *code;
+ code = *new_object;
}
- return Handle<Code>(result);
+ return Handle<Code>(code);
+}
+
+
+Object* CodeStub::TryGetCode() {
+ Code* code;
+ if (!FindCodeInCache(&code)) {
+ // Generate the new code.
+ MacroAssembler masm(NULL, 256);
+ GenerateCode(&masm);
+
+ // Create the code object.
+ CodeDesc desc;
+ masm.GetCode(&desc);
+
+ // Try to copy the generated code into a heap object.
+ Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop());
+ Object* new_object =
+ Heap::CreateCode(desc, NULL, flags, masm.CodeObject());
+ if (new_object->IsFailure()) return new_object;
+ code = Code::cast(new_object);
+ RecordCodeGeneration(code, &masm);
+
+ if (has_custom_cache()) {
+ SetCustomCache(code);
+ } else {
+ // Try to update the code cache but do not fail if unable.
+ new_object = Heap::code_stubs()->AtNumberPut(GetKey(), code);
+ if (!new_object->IsFailure()) {
+ Heap::public_set_code_stubs(NumberDictionary::cast(new_object));
+ }
+ }
+ }
+
+ return code;
}
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 25a2d0f..052c1ca 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -37,18 +37,24 @@
V(CallFunction) \
V(GenericBinaryOp) \
V(StringAdd) \
+ V(SubString) \
+ V(StringCompare) \
V(SmiOp) \
V(Compare) \
V(RecordWrite) \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
V(StackCheck) \
- V(UnarySub) \
+ V(FastNewClosure) \
+ V(FastNewContext) \
+ V(FastCloneShallowArray) \
+ V(GenericUnaryOp) \
V(RevertToNumber) \
V(ToBoolean) \
V(Instanceof) \
V(CounterOp) \
V(ArgumentsAccess) \
+ V(RegExpExec) \
V(Runtime) \
V(CEntry) \
V(JSEntry)
@@ -83,6 +89,11 @@
// Retrieve the code for the stub. Generate the code if needed.
Handle<Code> GetCode();
+ // Retrieve the code for the stub if already generated. Do not
+ // generate the code if not already generated and instead return a
+ // retry after GC Failure object.
+ Object* TryGetCode();
+
static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key));
};
@@ -104,9 +115,20 @@
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
private:
+ // Lookup the code in the (possibly custom) cache.
+ bool FindCodeInCache(Code** code_out);
+
+ // Nonvirtual wrapper around the stub-specific Generate function. Call
+ // this function to set up the macro assembler and generate the code.
+ void GenerateCode(MacroAssembler* masm);
+
// Generates the assembler code for the stub.
virtual void Generate(MacroAssembler* masm) = 0;
+ // Perform bookkeeping required after code generation when stub code is
+ // initially generated.
+ void RecordCodeGeneration(Code* code, MacroAssembler* masm);
+
// Returns information for computing the number key.
virtual Major MajorKey() = 0;
virtual int MinorKey() = 0;
diff --git a/src/codegen.cc b/src/codegen.cc
index 26e8d7d..fd7e0e8 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -342,11 +342,12 @@
{&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"},
{&CodeGenerator::GenerateLog, "_Log"},
{&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
- {&CodeGenerator::GenerateMathSin, "_Math_sin"},
- {&CodeGenerator::GenerateMathCos, "_Math_cos"},
{&CodeGenerator::GenerateIsObject, "_IsObject"},
{&CodeGenerator::GenerateIsFunction, "_IsFunction"},
{&CodeGenerator::GenerateStringAdd, "_StringAdd"},
+ {&CodeGenerator::GenerateSubString, "_SubString"},
+ {&CodeGenerator::GenerateStringCompare, "_StringCompare"},
+ {&CodeGenerator::GenerateRegExpExec, "_RegExpExec"},
};
@@ -450,6 +451,23 @@
}
+const char* GenericUnaryOpStub::GetName() {
+ switch (op_) {
+ case Token::SUB:
+ return overwrite_
+ ? "GenericUnaryOpStub_SUB_Overwrite"
+ : "GenericUnaryOpStub_SUB_Alloc";
+ case Token::BIT_NOT:
+ return overwrite_
+ ? "GenericUnaryOpStub_BIT_NOT_Overwrite"
+ : "GenericUnaryOpStub_BIT_NOT_Alloc";
+ default:
+ UNREACHABLE();
+ return "<unknown>";
+ }
+}
+
+
void RuntimeStub::Generate(MacroAssembler* masm) {
Runtime::Function* f = Runtime::FunctionForId(id_);
masm->TailCallRuntime(ExternalReference(f),
diff --git a/src/codegen.h b/src/codegen.h
index 85a08d5..76cc491 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -233,6 +233,55 @@
};
+class FastNewClosureStub : public CodeStub {
+ public:
+ void Generate(MacroAssembler* masm);
+
+ private:
+ const char* GetName() { return "FastNewClosureStub"; }
+ Major MajorKey() { return FastNewClosure; }
+ int MinorKey() { return 0; }
+};
+
+
+class FastNewContextStub : public CodeStub {
+ public:
+ static const int kMaximumSlots = 64;
+
+ explicit FastNewContextStub(int slots) : slots_(slots) {
+ ASSERT(slots_ > 0 && slots <= kMaximumSlots);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int slots_;
+
+ const char* GetName() { return "FastNewContextStub"; }
+ Major MajorKey() { return FastNewContext; }
+ int MinorKey() { return slots_; }
+};
+
+
+class FastCloneShallowArrayStub : public CodeStub {
+ public:
+ static const int kMaximumLength = 8;
+
+ explicit FastCloneShallowArrayStub(int length) : length_(length) {
+ ASSERT(length >= 0 && length <= kMaximumLength);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int length_;
+
+ const char* GetName() { return "FastCloneShallowArrayStub"; }
+ Major MajorKey() { return FastCloneShallowArray; }
+ int MinorKey() { return length_; }
+};
+
+
class InstanceofStub: public CodeStub {
public:
InstanceofStub() { }
@@ -245,30 +294,53 @@
};
-class UnarySubStub : public CodeStub {
+class GenericUnaryOpStub : public CodeStub {
public:
- explicit UnarySubStub(bool overwrite)
- : overwrite_(overwrite) { }
+ GenericUnaryOpStub(Token::Value op, bool overwrite)
+ : op_(op), overwrite_(overwrite) { }
private:
+ Token::Value op_;
bool overwrite_;
- Major MajorKey() { return UnarySub; }
- int MinorKey() { return overwrite_ ? 1 : 0; }
+
+ class OverwriteField: public BitField<int, 0, 1> {};
+ class OpField: public BitField<Token::Value, 1, kMinorBits - 1> {};
+
+ Major MajorKey() { return GenericUnaryOp; }
+ int MinorKey() {
+ return OpField::encode(op_) | OverwriteField::encode(overwrite_);
+ }
+
void Generate(MacroAssembler* masm);
- const char* GetName() { return "UnarySubStub"; }
+ const char* GetName();
+};
+
+
+enum NaNInformation {
+ kBothCouldBeNaN,
+ kCantBothBeNaN
};
class CompareStub: public CodeStub {
public:
- CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { }
+ CompareStub(Condition cc,
+ bool strict,
+ NaNInformation nan_info = kBothCouldBeNaN) :
+ cc_(cc), strict_(strict), never_nan_nan_(nan_info == kCantBothBeNaN) { }
void Generate(MacroAssembler* masm);
private:
Condition cc_;
bool strict_;
+ // Only used for 'equal' comparisons. Tells the stub that we already know
+ // that at least one side of the comparison is not NaN. This allows the
+ // stub to use object identity in the positive case. We ignore it when
+ // generating the minor key for other comparisons to avoid creating more
+ // stubs.
+ bool never_nan_nan_;
Major MajorKey() { return Compare; }
@@ -280,6 +352,9 @@
Register object,
Register scratch);
+ // Unfortunately you have to run without snapshots to see most of these
+ // names in the profile since most compare stubs end up in the snapshot.
+ const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("CompareStub (cc %d), (strict %s)\n",
@@ -421,6 +496,84 @@
};
+class RegExpExecStub: public CodeStub {
+ public:
+ RegExpExecStub() { }
+
+ private:
+ Major MajorKey() { return RegExpExec; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "RegExpExecStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RegExpExecStub\n");
+ }
+#endif
+};
+
+
+class CallFunctionStub: public CodeStub {
+ public:
+ CallFunctionStub(int argc, InLoopFlag in_loop, CallFunctionFlags flags)
+ : argc_(argc), in_loop_(in_loop), flags_(flags) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+ InLoopFlag in_loop_;
+ CallFunctionFlags flags_;
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("CallFunctionStub (args %d, in_loop %d, flags %d)\n",
+ argc_,
+ static_cast<int>(in_loop_),
+ static_cast<int>(flags_));
+ }
+#endif
+
+ // Minor key encoding in 31 bits AAAAAAAAAAAAAAAAAAAAAFI A(rgs)F(lag)I(nloop).
+ class InLoopBits: public BitField<InLoopFlag, 0, 1> {};
+ class FlagBits: public BitField<CallFunctionFlags, 1, 1> {};
+ class ArgcBits: public BitField<int, 2, 29> {};
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() {
+ // Encode the parameters in a unique 31 bit value.
+ return InLoopBits::encode(in_loop_)
+ | FlagBits::encode(flags_)
+ | ArgcBits::encode(argc_);
+ }
+
+ InLoopFlag InLoop() { return in_loop_; }
+ bool ReceiverMightBeValue() {
+ return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0;
+ }
+
+ public:
+ static int ExtractArgcFromMinorKey(int minor_key) {
+ return ArgcBits::decode(minor_key);
+ }
+};
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+ ToBooleanStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return 0; }
+};
+
+
} // namespace internal
} // namespace v8
diff --git a/src/compiler.cc b/src/compiler.cc
index 48da63d..b7aaedf 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -46,23 +46,11 @@
public:
enum CodeGenTag { NORMAL, FAST };
- CodeGenSelector()
- : has_supported_syntax_(true),
- context_(Expression::kUninitialized) {
- }
+ CodeGenSelector() : has_supported_syntax_(true) {}
CodeGenTag Select(FunctionLiteral* fun);
private:
- // Visit an expression in a given expression context.
- void ProcessExpression(Expression* expr, Expression::Context context) {
- Expression::Context saved = context_;
- context_ = context;
- Visit(expr);
- expr->set_context(context);
- context_ = saved;
- }
-
void VisitDeclarations(ZoneList<Declaration*>* decls);
void VisitStatements(ZoneList<Statement*>* stmts);
@@ -73,9 +61,6 @@
bool has_supported_syntax_;
- // The desired expression context of the currently visited expression.
- Expression::Context context_;
-
DISALLOW_COPY_AND_ASSIGN(CodeGenSelector);
};
@@ -536,7 +521,7 @@
LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *literal->name()));
#ifdef ENABLE_OPROFILE_AGENT
- OProfileAgent::CreateNativeCodeRegion(*node->name(),
+ OProfileAgent::CreateNativeCodeRegion(*literal->name(),
code->instruction_start(),
code->instruction_size());
#endif
@@ -647,18 +632,12 @@
void CodeGenSelector::VisitDeclaration(Declaration* decl) {
Property* prop = decl->proxy()->AsProperty();
if (prop != NULL) {
- // Property rewrites are shared, ensure we are not changing its
- // expression context state.
- ASSERT(prop->obj()->context() == Expression::kUninitialized ||
- prop->obj()->context() == Expression::kValue);
- ASSERT(prop->key()->context() == Expression::kUninitialized ||
- prop->key()->context() == Expression::kValue);
- ProcessExpression(prop->obj(), Expression::kValue);
- ProcessExpression(prop->key(), Expression::kValue);
+ Visit(prop->obj());
+ Visit(prop->key());
}
if (decl->fun() != NULL) {
- ProcessExpression(decl->fun(), Expression::kValue);
+ Visit(decl->fun());
}
}
@@ -669,17 +648,15 @@
void CodeGenSelector::VisitExpressionStatement(ExpressionStatement* stmt) {
- ProcessExpression(stmt->expression(), Expression::kEffect);
+ Visit(stmt->expression());
}
-void CodeGenSelector::VisitEmptyStatement(EmptyStatement* stmt) {
- // EmptyStatement is supported.
-}
+void CodeGenSelector::VisitEmptyStatement(EmptyStatement* stmt) {}
void CodeGenSelector::VisitIfStatement(IfStatement* stmt) {
- ProcessExpression(stmt->condition(), Expression::kTest);
+ Visit(stmt->condition());
CHECK_BAILOUT;
Visit(stmt->then_statement());
CHECK_BAILOUT;
@@ -687,29 +664,23 @@
}
-void CodeGenSelector::VisitContinueStatement(ContinueStatement* stmt) {
- BAILOUT("ContinueStatement");
-}
+void CodeGenSelector::VisitContinueStatement(ContinueStatement* stmt) {}
-void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) {
- BAILOUT("BreakStatement");
-}
+void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) {}
void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) {
- ProcessExpression(stmt->expression(), Expression::kValue);
+ Visit(stmt->expression());
}
void CodeGenSelector::VisitWithEnterStatement(WithEnterStatement* stmt) {
- BAILOUT("WithEnterStatement");
+ Visit(stmt->expression());
}
-void CodeGenSelector::VisitWithExitStatement(WithExitStatement* stmt) {
- BAILOUT("WithExitStatement");
-}
+void CodeGenSelector::VisitWithExitStatement(WithExitStatement* stmt) {}
void CodeGenSelector::VisitSwitchStatement(SwitchStatement* stmt) {
@@ -718,39 +689,21 @@
void CodeGenSelector::VisitDoWhileStatement(DoWhileStatement* stmt) {
- // We do not handle loops with breaks or continue statements in their
- // body. We will bailout when we hit those statements in the body.
- ProcessExpression(stmt->cond(), Expression::kTest);
+ Visit(stmt->cond());
CHECK_BAILOUT;
Visit(stmt->body());
}
void CodeGenSelector::VisitWhileStatement(WhileStatement* stmt) {
- // We do not handle loops with breaks or continue statements in their
- // body. We will bailout when we hit those statements in the body.
- ProcessExpression(stmt->cond(), Expression::kTest);
+ Visit(stmt->cond());
CHECK_BAILOUT;
Visit(stmt->body());
}
void CodeGenSelector::VisitForStatement(ForStatement* stmt) {
- // We do not handle loops with breaks or continue statements in their
- // body. We will bailout when we hit those statements in the body.
- if (stmt->init() != NULL) {
- Visit(stmt->init());
- CHECK_BAILOUT;
- }
- if (stmt->cond() != NULL) {
- ProcessExpression(stmt->cond(), Expression::kTest);
- CHECK_BAILOUT;
- }
- Visit(stmt->body());
- if (stmt->next() != NULL) {
- CHECK_BAILOUT;
- Visit(stmt->next());
- }
+ BAILOUT("ForStatement");
}
@@ -760,23 +713,23 @@
void CodeGenSelector::VisitTryCatchStatement(TryCatchStatement* stmt) {
- BAILOUT("TryCatchStatement");
+ Visit(stmt->try_block());
+ CHECK_BAILOUT;
+ Visit(stmt->catch_block());
}
void CodeGenSelector::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- BAILOUT("TryFinallyStatement");
+ Visit(stmt->try_block());
+ CHECK_BAILOUT;
+ Visit(stmt->finally_block());
}
-void CodeGenSelector::VisitDebuggerStatement(DebuggerStatement* stmt) {
- // Debugger statement is supported.
-}
+void CodeGenSelector::VisitDebuggerStatement(DebuggerStatement* stmt) {}
-void CodeGenSelector::VisitFunctionLiteral(FunctionLiteral* expr) {
- // Function literal is supported.
-}
+void CodeGenSelector::VisitFunctionLiteral(FunctionLiteral* expr) {}
void CodeGenSelector::VisitFunctionBoilerplateLiteral(
@@ -786,11 +739,11 @@
void CodeGenSelector::VisitConditional(Conditional* expr) {
- ProcessExpression(expr->condition(), Expression::kTest);
+ Visit(expr->condition());
CHECK_BAILOUT;
- ProcessExpression(expr->then_expression(), context_);
+ Visit(expr->then_expression());
CHECK_BAILOUT;
- ProcessExpression(expr->else_expression(), context_);
+ Visit(expr->else_expression());
}
@@ -800,11 +753,9 @@
void CodeGenSelector::VisitVariableProxy(VariableProxy* expr) {
- Expression* rewrite = expr->var()->rewrite();
- // A rewrite of NULL indicates a global variable.
- if (rewrite != NULL) {
- // Non-global.
- Slot* slot = rewrite->AsSlot();
+ Variable* var = expr->var();
+ if (!var->is_global()) {
+ Slot* slot = var->slot();
if (slot != NULL) {
Slot::Type type = slot->type();
// When LOOKUP slots are enabled, some currently dead code
@@ -813,10 +764,10 @@
BAILOUT("Lookup slot");
}
} else {
+ // If not global or a slot, it is a parameter rewritten to an explicit
+ // property reference on the (shadow) arguments object.
#ifdef DEBUG
- // Only remaining possibility is a property where the object is
- // a slotted variable and the key is a smi.
- Property* property = rewrite->AsProperty();
+ Property* property = var->AsProperty();
ASSERT_NOT_NULL(property);
Variable* object = property->obj()->AsVariableProxy()->AsVariable();
ASSERT_NOT_NULL(object);
@@ -829,14 +780,10 @@
}
-void CodeGenSelector::VisitLiteral(Literal* expr) {
- /* Nothing to do. */
-}
+void CodeGenSelector::VisitLiteral(Literal* expr) {}
-void CodeGenSelector::VisitRegExpLiteral(RegExpLiteral* expr) {
- /* Nothing to do. */
-}
+void CodeGenSelector::VisitRegExpLiteral(RegExpLiteral* expr) {}
void CodeGenSelector::VisitObjectLiteral(ObjectLiteral* expr) {
@@ -845,31 +792,9 @@
for (int i = 0, len = properties->length(); i < len; i++) {
ObjectLiteral::Property* property = properties->at(i);
if (property->IsCompileTimeValue()) continue;
-
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- UNREACHABLE();
-
- // For (non-compile-time) materialized literals and computed
- // properties with symbolic keys we will use an IC and therefore not
- // generate code for the key.
- case ObjectLiteral::Property::COMPUTED: // Fall through.
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- if (property->key()->handle()->IsSymbol()) {
- break;
- }
- // Fall through.
-
- // In all other cases we need the key's value on the stack
- // for a runtime call. (Relies on TEMP meaning STACK.)
- case ObjectLiteral::Property::GETTER: // Fall through.
- case ObjectLiteral::Property::SETTER: // Fall through.
- case ObjectLiteral::Property::PROTOTYPE:
- ProcessExpression(property->key(), Expression::kValue);
- CHECK_BAILOUT;
- break;
- }
- ProcessExpression(property->value(), Expression::kValue);
+ Visit(property->key());
+ CHECK_BAILOUT;
+ Visit(property->value());
CHECK_BAILOUT;
}
}
@@ -881,14 +806,16 @@
Expression* subexpr = subexprs->at(i);
if (subexpr->AsLiteral() != NULL) continue;
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
- ProcessExpression(subexpr, Expression::kValue);
+ Visit(subexpr);
CHECK_BAILOUT;
}
}
void CodeGenSelector::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- BAILOUT("CatchExtensionObject");
+ Visit(expr->key());
+ CHECK_BAILOUT;
+ Visit(expr->value());
}
@@ -897,14 +824,14 @@
// non-context (stack-allocated) locals, and global variables.
Token::Value op = expr->op();
if (op == Token::INIT_CONST) BAILOUT("initialize constant");
- if (op != Token::ASSIGN && op != Token::INIT_VAR) {
- BAILOUT("compound assignment");
- }
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
ASSERT(var == NULL || prop == NULL);
if (var != NULL) {
+ if (var->mode() == Variable::CONST) {
+ BAILOUT("Assignment to const");
+ }
// All global variables are supported.
if (!var->is_global()) {
ASSERT(var->slot() != NULL);
@@ -914,41 +841,28 @@
}
}
} else if (prop != NULL) {
- ASSERT(prop->obj()->context() == Expression::kUninitialized ||
- prop->obj()->context() == Expression::kValue);
- ProcessExpression(prop->obj(), Expression::kValue);
+ Visit(prop->obj());
CHECK_BAILOUT;
- // We will only visit the key during code generation for keyed property
- // stores. Leave its expression context uninitialized for named
- // property stores.
- Literal* lit = prop->key()->AsLiteral();
- uint32_t ignored;
- if (lit == NULL ||
- !lit->handle()->IsSymbol() ||
- String::cast(*(lit->handle()))->AsArrayIndex(&ignored)) {
- ASSERT(prop->key()->context() == Expression::kUninitialized ||
- prop->key()->context() == Expression::kValue);
- ProcessExpression(prop->key(), Expression::kValue);
- CHECK_BAILOUT;
- }
+ Visit(prop->key());
+ CHECK_BAILOUT;
} else {
// This is a throw reference error.
BAILOUT("non-variable/non-property assignment");
}
- ProcessExpression(expr->value(), Expression::kValue);
+ Visit(expr->value());
}
void CodeGenSelector::VisitThrow(Throw* expr) {
- BAILOUT("Throw");
+ Visit(expr->exception());
}
void CodeGenSelector::VisitProperty(Property* expr) {
- ProcessExpression(expr->obj(), Expression::kValue);
+ Visit(expr->obj());
CHECK_BAILOUT;
- ProcessExpression(expr->key(), Expression::kValue);
+ Visit(expr->key());
}
@@ -967,35 +881,29 @@
BAILOUT("call to a lookup slot");
} else if (fun->AsProperty() != NULL) {
Property* prop = fun->AsProperty();
- Literal* literal_key = prop->key()->AsLiteral();
- if (literal_key != NULL && literal_key->handle()->IsSymbol()) {
- ProcessExpression(prop->obj(), Expression::kValue);
- CHECK_BAILOUT;
- } else {
- ProcessExpression(prop->obj(), Expression::kValue);
- CHECK_BAILOUT;
- ProcessExpression(prop->key(), Expression::kValue);
- CHECK_BAILOUT;
- }
+ Visit(prop->obj());
+ CHECK_BAILOUT;
+ Visit(prop->key());
+ CHECK_BAILOUT;
} else {
// Otherwise the call is supported if the function expression is.
- ProcessExpression(fun, Expression::kValue);
+ Visit(fun);
}
// Check all arguments to the call.
for (int i = 0; i < args->length(); i++) {
- ProcessExpression(args->at(i), Expression::kValue);
+ Visit(args->at(i));
CHECK_BAILOUT;
}
}
void CodeGenSelector::VisitCallNew(CallNew* expr) {
- ProcessExpression(expr->expression(), Expression::kValue);
+ Visit(expr->expression());
CHECK_BAILOUT;
ZoneList<Expression*>* args = expr->arguments();
// Check all arguments to the call
for (int i = 0; i < args->length(); i++) {
- ProcessExpression(args->at(i), Expression::kValue);
+ Visit(args->at(i));
CHECK_BAILOUT;
}
}
@@ -1009,7 +917,7 @@
}
// Check all arguments to the call. (Relies on TEMP meaning STACK.)
for (int i = 0; i < expr->arguments()->length(); i++) {
- ProcessExpression(expr->arguments()->at(i), Expression::kValue);
+ Visit(expr->arguments()->at(i));
CHECK_BAILOUT;
}
}
@@ -1018,113 +926,60 @@
void CodeGenSelector::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::VOID:
- ProcessExpression(expr->expression(), Expression::kEffect);
- break;
case Token::NOT:
- ProcessExpression(expr->expression(), Expression::kTest);
- break;
case Token::TYPEOF:
- ProcessExpression(expr->expression(), Expression::kValue);
+ Visit(expr->expression());
break;
+ case Token::BIT_NOT:
+ BAILOUT("UnaryOperataion: BIT_NOT");
+ case Token::DELETE:
+ BAILOUT("UnaryOperataion: DELETE");
default:
- BAILOUT("UnaryOperation");
+ BAILOUT("UnaryOperataion");
}
}
void CodeGenSelector::VisitCountOperation(CountOperation* expr) {
- // We support postfix count operations on global variables.
- if (expr->is_prefix()) BAILOUT("Prefix CountOperation");
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- if (var == NULL || !var->is_global()) BAILOUT("non-global postincrement");
- ProcessExpression(expr->expression(), Expression::kValue);
-}
-
-
-void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
- switch (expr->op()) {
- case Token::COMMA:
- ProcessExpression(expr->left(), Expression::kEffect);
- CHECK_BAILOUT;
- ProcessExpression(expr->right(), context_);
- break;
-
- case Token::OR:
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect: // Fall through.
- case Expression::kTest: // Fall through.
- case Expression::kTestValue:
- // The left subexpression's value is not needed, it is in a pure
- // test context.
- ProcessExpression(expr->left(), Expression::kTest);
- break;
- case Expression::kValue: // Fall through.
- case Expression::kValueTest:
- // The left subexpression's value is needed, it is in a hybrid
- // value/test context.
- ProcessExpression(expr->left(), Expression::kValueTest);
- break;
+ Property* prop = expr->expression()->AsProperty();
+ ASSERT(var == NULL || prop == NULL);
+ if (var != NULL) {
+ // All global variables are supported.
+ if (!var->is_global()) {
+ ASSERT(var->slot() != NULL);
+ Slot::Type type = var->slot()->type();
+ if (type == Slot::LOOKUP) {
+ BAILOUT("CountOperation with lookup slot");
}
- CHECK_BAILOUT;
- ProcessExpression(expr->right(), context_);
- break;
-
- case Token::AND:
- switch (context_) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect: // Fall through.
- case Expression::kTest: // Fall through.
- case Expression::kValueTest:
- // The left subexpression's value is not needed, it is in a pure
- // test context.
- ProcessExpression(expr->left(), Expression::kTest);
- break;
- case Expression::kValue: // Fall through.
- case Expression::kTestValue:
- // The left subexpression's value is needed, it is in a hybrid
- // test/value context.
- ProcessExpression(expr->left(), Expression::kTestValue);
- break;
- }
- CHECK_BAILOUT;
- ProcessExpression(expr->right(), context_);
- break;
-
- case Token::ADD:
- case Token::SUB:
- case Token::DIV:
- case Token::MOD:
- case Token::MUL:
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SHL:
- case Token::SHR:
- case Token::SAR:
- ProcessExpression(expr->left(), Expression::kValue);
- CHECK_BAILOUT;
- ProcessExpression(expr->right(), Expression::kValue);
- break;
-
- default:
- BAILOUT("Unsupported binary operation");
+ }
+ } else if (prop != NULL) {
+ Visit(prop->obj());
+ CHECK_BAILOUT;
+ Visit(prop->key());
+ CHECK_BAILOUT;
+ } else {
+ // This is a throw reference error.
+ BAILOUT("CountOperation non-variable/non-property expression");
}
}
+void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
+ Visit(expr->left());
+ CHECK_BAILOUT;
+ Visit(expr->right());
+}
+
+
void CodeGenSelector::VisitCompareOperation(CompareOperation* expr) {
- ProcessExpression(expr->left(), Expression::kValue);
- CHECK_BAILOUT;
- ProcessExpression(expr->right(), Expression::kValue);
+ Visit(expr->left());
+ CHECK_BAILOUT;
+ Visit(expr->right());
}
-void CodeGenSelector::VisitThisFunction(ThisFunction* expr) {
- // ThisFunction is supported.
-}
+void CodeGenSelector::VisitThisFunction(ThisFunction* expr) {}
#undef BAILOUT
#undef CHECK_BAILOUT
diff --git a/src/contexts.cc b/src/contexts.cc
index ead73ee..19920d2 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -52,11 +52,14 @@
if (global()->IsGlobalObject()) {
return global()->global_context();
}
+
// During bootstrapping, the global object might not be set and we
// have to search the context chain to find the global context.
+ ASSERT(Bootstrapper::IsActive());
Context* current = this;
while (!current->IsGlobalContext()) {
- current = Context::cast(JSFunction::cast(current->closure())->context());
+ JSFunction* closure = JSFunction::cast(current->closure());
+ current = Context::cast(closure->context());
}
return current;
}
diff --git a/src/contexts.h b/src/contexts.h
index bdfc40b..66c1575 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -77,6 +77,7 @@
V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun) \
V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun) \
V(TO_BOOLEAN_FUN_INDEX, JSFunction, to_boolean_fun) \
+ V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
V(FUNCTION_MAP_INDEX, Map, function_map) \
@@ -202,6 +203,7 @@
TO_UINT32_FUN_INDEX,
TO_INT32_FUN_INDEX,
TO_BOOLEAN_FUN_INDEX,
+ GLOBAL_EVAL_FUN_INDEX,
INSTANTIATE_FUN_INDEX,
CONFIGURE_INSTANCE_FUN_INDEX,
SPECIAL_FUNCTION_TABLE_INDEX,
diff --git a/src/date-delay.js b/src/date-delay.js
index 0778dc9..7d8f458 100644
--- a/src/date-delay.js
+++ b/src/date-delay.js
@@ -45,12 +45,6 @@
throw new $TypeError('this is not a Date object.');
}
-// ECMA 262 - 15.9.1.2
-function Day(time) {
- return FLOOR(time / msPerDay);
-}
-
-
// ECMA 262 - 5.2
function Modulo(value, remainder) {
var mod = value % remainder;
@@ -86,30 +80,13 @@
}
-function YearFromTime(time) {
- return FromJulianDay(Day(time) + kDayZeroInJulianDay).year;
-}
-
-
function InLeapYear(time) {
- return DaysInYear(YearFromTime(time)) == 366 ? 1 : 0;
-}
-
-
-// ECMA 262 - 15.9.1.4
-function MonthFromTime(time) {
- return FromJulianDay(Day(time) + kDayZeroInJulianDay).month;
+ return DaysInYear(YEAR_FROM_TIME(time)) == 366 ? 1 : 0;
}
function DayWithinYear(time) {
- return Day(time) - DayFromYear(YearFromTime(time));
-}
-
-
-// ECMA 262 - 15.9.1.5
-function DateFromTime(time) {
- return FromJulianDay(Day(time) + kDayZeroInJulianDay).date;
+ return DAY(time) - DayFromYear(YEAR_FROM_TIME(time));
}
@@ -136,7 +113,7 @@
// we must do this, but for compatibility with other browsers, we use
// the actual year if it is in the range 1970..2037
if (t >= 0 && t <= 2.1e12) return t;
- var day = MakeDay(EquivalentYear(YearFromTime(t)), MonthFromTime(t), DateFromTime(t));
+ var day = MakeDay(EquivalentYear(YEAR_FROM_TIME(t)), MONTH_FROM_TIME(t), DATE_FROM_TIME(t));
return TimeClip(MakeDate(day, TimeWithinDay(t)));
}
@@ -232,7 +209,7 @@
function WeekDay(time) {
- return Modulo(Day(time) + 4, 7);
+ return Modulo(DAY(time) + 4, 7);
}
var local_time_offset = %DateLocalTimeOffset();
@@ -243,7 +220,14 @@
}
function LocalTimeNoCheck(time) {
- return time + local_time_offset + DaylightSavingsOffset(time);
+ // Inline the DST offset cache checks for speed.
+ var cache = DST_offset_cache;
+ if (cache.start <= time && time <= cache.end) {
+ var dst_offset = cache.offset;
+ } else {
+ var dst_offset = DaylightSavingsOffset(time);
+ }
+ return time + local_time_offset + dst_offset;
}
@@ -254,27 +238,6 @@
}
-// ECMA 262 - 15.9.1.10
-function HourFromTime(time) {
- return Modulo(FLOOR(time / msPerHour), HoursPerDay);
-}
-
-
-function MinFromTime(time) {
- return Modulo(FLOOR(time / msPerMinute), MinutesPerHour);
-}
-
-
-function SecFromTime(time) {
- return Modulo(FLOOR(time / msPerSecond), SecondsPerMinute);
-}
-
-
-function msFromTime(time) {
- return Modulo(time, msPerSecond);
-}
-
-
// ECMA 262 - 15.9.1.11
function MakeTime(hour, min, sec, ms) {
if (!$isFinite(hour)) return $NaN;
@@ -468,7 +431,7 @@
value = DateParse(year);
if (!NUMBER_IS_NAN(value)) {
cache.time = value;
- cache.year = YearFromTime(LocalTimeNoCheck(value));
+ cache.year = YEAR_FROM_TIME(LocalTimeNoCheck(value));
cache.string = year;
}
}
@@ -508,60 +471,59 @@
return DATE_VALUE(aDate);
}
-
function GetMillisecondsFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return msFromTime(LocalTimeNoCheck(t));
+ return MS_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCMillisecondsFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return msFromTime(t);
+ return MS_FROM_TIME(t);
}
function GetSecondsFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return SecFromTime(LocalTimeNoCheck(t));
+ return SEC_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCSecondsFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return SecFromTime(t);
+ return SEC_FROM_TIME(t);
}
function GetMinutesFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return MinFromTime(LocalTimeNoCheck(t));
+ return MIN_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCMinutesFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return MinFromTime(t);
+ return MIN_FROM_TIME(t);
}
function GetHoursFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return HourFromTime(LocalTimeNoCheck(t));
+ return HOUR_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCHoursFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return HourFromTime(t);
+ return HOUR_FROM_TIME(t);
}
@@ -570,42 +532,42 @@
if (NUMBER_IS_NAN(t)) return t;
var cache = Date_cache;
if (cache.time === t) return cache.year;
- return YearFromTime(LocalTimeNoCheck(t));
+ return YEAR_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCFullYearFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return YearFromTime(t);
+ return YEAR_FROM_TIME(t);
}
function GetMonthFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return MonthFromTime(LocalTimeNoCheck(t));
+ return MONTH_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCMonthFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return MonthFromTime(t);
+ return MONTH_FROM_TIME(t);
}
function GetDateFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return DateFromTime(LocalTimeNoCheck(t));
+ return DATE_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCDateFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return DateFromTime(t);
+ return DATE_FROM_TIME(t);
}
@@ -622,7 +584,7 @@
function DateString(time) {
- var YMD = FromJulianDay(Day(time) + kDayZeroInJulianDay);
+ var YMD = FromJulianDay(DAY(time) + kDayZeroInJulianDay);
return WeekDays[WeekDay(time)] + ' '
+ Months[YMD.month] + ' '
+ TwoDigitString(YMD.date) + ' '
@@ -635,7 +597,7 @@
function LongDateString(time) {
- var YMD = FromJulianDay(Day(time) + kDayZeroInJulianDay);
+ var YMD = FromJulianDay(DAY(time) + kDayZeroInJulianDay);
return LongWeekDays[WeekDay(time)] + ', '
+ LongMonths[YMD.month] + ' '
+ TwoDigitString(YMD.date) + ', '
@@ -644,9 +606,9 @@
function TimeString(time) {
- return TwoDigitString(HourFromTime(time)) + ':'
- + TwoDigitString(MinFromTime(time)) + ':'
- + TwoDigitString(SecFromTime(time));
+ return TwoDigitString(HOUR_FROM_TIME(time)) + ':'
+ + TwoDigitString(MIN_FROM_TIME(time)) + ':'
+ + TwoDigitString(SEC_FROM_TIME(time));
}
@@ -892,8 +854,8 @@
function DateSetMilliseconds(ms) {
var t = LocalTime(DATE_VALUE(this));
ms = ToNumber(ms);
- var time = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms);
- return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
+ var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
+ return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
}
@@ -901,8 +863,8 @@
function DateSetUTCMilliseconds(ms) {
var t = DATE_VALUE(this);
ms = ToNumber(ms);
- var time = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms);
- return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
+ var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
+ return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
}
@@ -911,8 +873,8 @@
var t = LocalTime(DATE_VALUE(this));
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? GetMillisecondsFrom(this) : ToNumber(ms);
- var time = MakeTime(HourFromTime(t), MinFromTime(t), sec, ms);
- return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
+ var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms);
+ return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
}
@@ -921,8 +883,8 @@
var t = DATE_VALUE(this);
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
- var time = MakeTime(HourFromTime(t), MinFromTime(t), sec, ms);
- return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
+ var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms);
+ return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
}
@@ -933,8 +895,8 @@
var argc = %_ArgumentsLength();
sec = argc < 2 ? GetSecondsFrom(this) : ToNumber(sec);
ms = argc < 3 ? GetMillisecondsFrom(this) : ToNumber(ms);
- var time = MakeTime(HourFromTime(t), min, sec, ms);
- return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
+ var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms);
+ return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
}
@@ -945,8 +907,8 @@
var argc = %_ArgumentsLength();
sec = argc < 2 ? GetUTCSecondsFrom(this) : ToNumber(sec);
ms = argc < 3 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
- var time = MakeTime(HourFromTime(t), min, sec, ms);
- return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
+ var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms);
+ return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
}
@@ -959,7 +921,7 @@
sec = argc < 3 ? GetSecondsFrom(this) : ToNumber(sec);
ms = argc < 4 ? GetMillisecondsFrom(this) : ToNumber(ms);
var time = MakeTime(hour, min, sec, ms);
- return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
+ return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
}
@@ -972,7 +934,7 @@
sec = argc < 3 ? GetUTCSecondsFrom(this) : ToNumber(sec);
ms = argc < 4 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
var time = MakeTime(hour, min, sec, ms);
- return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
+ return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
}
@@ -980,7 +942,7 @@
function DateSetDate(date) {
var t = LocalTime(DATE_VALUE(this));
date = ToNumber(date);
- var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
+ var day = MakeDay(YEAR_FROM_TIME(t), MONTH_FROM_TIME(t), date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@@ -989,7 +951,7 @@
function DateSetUTCDate(date) {
var t = DATE_VALUE(this);
date = ToNumber(date);
- var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
+ var day = MakeDay(YEAR_FROM_TIME(t), MONTH_FROM_TIME(t), date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
}
@@ -999,7 +961,7 @@
var t = LocalTime(DATE_VALUE(this));
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? GetDateFrom(this) : ToNumber(date);
- var day = MakeDay(YearFromTime(t), month, date);
+ var day = MakeDay(YEAR_FROM_TIME(t), month, date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@@ -1009,7 +971,7 @@
var t = DATE_VALUE(this);
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? GetUTCDateFrom(this) : ToNumber(date);
- var day = MakeDay(YearFromTime(t), month, date);
+ var day = MakeDay(YEAR_FROM_TIME(t), month, date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
}
@@ -1020,8 +982,8 @@
t = NUMBER_IS_NAN(t) ? 0 : LocalTimeNoCheck(t);
year = ToNumber(year);
var argc = %_ArgumentsLength();
- month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
- date = argc < 3 ? DateFromTime(t) : ToNumber(date);
+ month = argc < 2 ? MONTH_FROM_TIME(t) : ToNumber(month);
+ date = argc < 3 ? DATE_FROM_TIME(t) : ToNumber(date);
var day = MakeDay(year, month, date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@@ -1033,8 +995,8 @@
if (NUMBER_IS_NAN(t)) t = 0;
var argc = %_ArgumentsLength();
year = ToNumber(year);
- month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
- date = argc < 3 ? DateFromTime(t) : ToNumber(date);
+ month = argc < 2 ? MONTH_FROM_TIME(t) : ToNumber(month);
+ date = argc < 3 ? DATE_FROM_TIME(t) : ToNumber(date);
var day = MakeDay(year, month, date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
}
@@ -1046,9 +1008,9 @@
if (NUMBER_IS_NAN(t)) return kInvalidDate;
// Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
return WeekDays[WeekDay(t)] + ', '
- + TwoDigitString(DateFromTime(t)) + ' '
- + Months[MonthFromTime(t)] + ' '
- + YearFromTime(t) + ' '
+ + TwoDigitString(DATE_FROM_TIME(t)) + ' '
+ + Months[MONTH_FROM_TIME(t)] + ' '
+ + YEAR_FROM_TIME(t) + ' '
+ TimeString(t) + ' GMT';
}
@@ -1057,7 +1019,7 @@
function DateGetYear() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return $NaN;
- return YearFromTime(LocalTimeNoCheck(t)) - 1900;
+ return YEAR_FROM_TIME(LocalTimeNoCheck(t)) - 1900;
}
@@ -1069,7 +1031,7 @@
if (NUMBER_IS_NAN(year)) return %_SetValueOf(this, $NaN);
year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
- var day = MakeDay(year, MonthFromTime(t), DateFromTime(t));
+ var day = MakeDay(year, MONTH_FROM_TIME(t), DATE_FROM_TIME(t));
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@@ -1086,16 +1048,19 @@
}
-function PadInt(n) {
- // Format integers to have at least two digits.
- return n < 10 ? '0' + n : n;
+function PadInt(n, digits) {
+ if (digits == 1) return n;
+ return n < MathPow(10, digits - 1) ? '0' + PadInt(n, digits - 1) : n;
}
function DateToISOString() {
- return this.getUTCFullYear() + '-' + PadInt(this.getUTCMonth() + 1) +
- '-' + PadInt(this.getUTCDate()) + 'T' + PadInt(this.getUTCHours()) +
- ':' + PadInt(this.getUTCMinutes()) + ':' + PadInt(this.getUTCSeconds()) +
+ var t = DATE_VALUE(this);
+ if (NUMBER_IS_NAN(t)) return kInvalidDate;
+ return this.getUTCFullYear() + '-' + PadInt(this.getUTCMonth() + 1, 2) +
+ '-' + PadInt(this.getUTCDate(), 2) + 'T' + PadInt(this.getUTCHours(), 2) +
+ ':' + PadInt(this.getUTCMinutes(), 2) + ':' + PadInt(this.getUTCSeconds(), 2) +
+ '.' + PadInt(this.getUTCMilliseconds(), 3) +
'Z';
}
diff --git a/src/debug-agent.cc b/src/debug-agent.cc
index 0701382..41151d8 100644
--- a/src/debug-agent.cc
+++ b/src/debug-agent.cc
@@ -54,10 +54,12 @@
while (!bound && !terminate_) {
bound = server_->Bind(port_);
- // If an error occoured wait a bit before retrying. The most common error
+ // If an error occurred wait a bit before retrying. The most common error
// would be that the port is already in use so this avoids a busy loop and
// make the agent take over the port when it becomes free.
if (!bound) {
+ PrintF("Failed to open socket on port %d, "
+ "waiting %d ms before retrying\n", port_, kOneSecondInMicros / 1000);
terminate_now_->Wait(kOneSecondInMicros);
}
}
diff --git a/src/debug.cc b/src/debug.cc
index 2c4552e..34b3a6d 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1241,12 +1241,14 @@
uint32_t key = Smi::cast(*obj)->value();
// Argc in the stub is the number of arguments passed - not the
// expected arguments of the called function.
- int call_function_arg_count = CodeStub::MinorKeyFromKey(key);
+ int call_function_arg_count =
+ CallFunctionStub::ExtractArgcFromMinorKey(
+ CodeStub::MinorKeyFromKey(key));
ASSERT(call_function_stub->major_key() ==
CodeStub::MajorKeyFromKey(key));
// Find target function on the expression stack.
- // Expression stack lools like this (top to bottom):
+ // Expression stack looks like this (top to bottom):
// argN
// ...
// arg0
@@ -1759,8 +1761,10 @@
v8::Debug::MessageHandler2 Debugger::message_handler_ = NULL;
bool Debugger::debugger_unload_pending_ = false;
v8::Debug::HostDispatchHandler Debugger::host_dispatch_handler_ = NULL;
+Mutex* Debugger::dispatch_handler_access_ = OS::CreateMutex();
v8::Debug::DebugMessageDispatchHandler
Debugger::debug_message_dispatch_handler_ = NULL;
+MessageDispatchHelperThread* Debugger::message_dispatch_helper_thread_ = NULL;
int Debugger::host_dispatch_micros_ = 100 * 1000;
DebuggerAgent* Debugger::agent_ = NULL;
LockingCommandMessageQueue Debugger::command_queue_(kQueueInitialSize);
@@ -2379,17 +2383,12 @@
if (IsDebuggerActive()) {
// Disable the compilation cache when the debugger is active.
CompilationCache::Disable();
+ debugger_unload_pending_ = false;
} else {
CompilationCache::Enable();
-
// Unload the debugger if event listener and message handler cleared.
- if (Debug::InDebugger()) {
- // If we are in debugger set the flag to unload the debugger when last
- // EnterDebugger on the current stack is destroyed.
- debugger_unload_pending_ = true;
- } else {
- UnloadDebugger();
- }
+ // Schedule this for later, because we may be in non-V8 thread.
+ debugger_unload_pending_ = true;
}
}
@@ -2402,8 +2401,14 @@
void Debugger::SetDebugMessageDispatchHandler(
- v8::Debug::DebugMessageDispatchHandler handler) {
+ v8::Debug::DebugMessageDispatchHandler handler, bool provide_locker) {
+ ScopedLock with(dispatch_handler_access_);
debug_message_dispatch_handler_ = handler;
+
+ if (provide_locker && message_dispatch_helper_thread_ == NULL) {
+ message_dispatch_helper_thread_ = new MessageDispatchHelperThread;
+ message_dispatch_helper_thread_->Start();
+ }
}
@@ -2438,8 +2443,16 @@
StackGuard::DebugCommand();
}
- if (Debugger::debug_message_dispatch_handler_ != NULL) {
- Debugger::debug_message_dispatch_handler_();
+ MessageDispatchHelperThread* dispatch_thread;
+ {
+ ScopedLock with(dispatch_handler_access_);
+ dispatch_thread = message_dispatch_helper_thread_;
+ }
+
+ if (dispatch_thread == NULL) {
+ CallMessageDispatchHandler();
+ } else {
+ dispatch_thread->Schedule();
}
}
@@ -2483,7 +2496,24 @@
}
-bool Debugger::StartAgent(const char* name, int port) {
+static void StubMessageHandler2(const v8::Debug::Message& message) {
+ // Simply ignore message.
+}
+
+
+bool Debugger::StartAgent(const char* name, int port,
+ bool wait_for_connection) {
+ if (wait_for_connection) {
+ // Suspend V8 if it is already running or set V8 to suspend whenever
+ // it starts.
+ // Provide stub message handler; V8 auto-continues each suspend
+ // when there is no message handler; we doesn't need it.
+ // Once become suspended, V8 will stay so indefinitely long, until remote
+ // debugger connects and issues "continue" command.
+ Debugger::message_handler_ = StubMessageHandler2;
+ v8::Debug::DebugBreak();
+ }
+
if (Socket::Setup()) {
agent_ = new DebuggerAgent(name, port);
agent_->Start();
@@ -2509,6 +2539,19 @@
agent_->WaitUntilListening();
}
+
+void Debugger::CallMessageDispatchHandler() {
+ v8::Debug::DebugMessageDispatchHandler handler;
+ {
+ ScopedLock with(dispatch_handler_access_);
+ handler = Debugger::debug_message_dispatch_handler_;
+ }
+ if (handler != NULL) {
+ handler();
+ }
+}
+
+
MessageImpl MessageImpl::NewEvent(DebugEvent event,
bool running,
Handle<JSObject> exec_state,
@@ -2729,6 +2772,45 @@
queue_.Clear();
}
+
+MessageDispatchHelperThread::MessageDispatchHelperThread()
+ : sem_(OS::CreateSemaphore(0)), mutex_(OS::CreateMutex()),
+ already_signalled_(false) {
+}
+
+
+MessageDispatchHelperThread::~MessageDispatchHelperThread() {
+ delete mutex_;
+ delete sem_;
+}
+
+
+void MessageDispatchHelperThread::Schedule() {
+ {
+ ScopedLock lock(mutex_);
+ if (already_signalled_) {
+ return;
+ }
+ already_signalled_ = true;
+ }
+ sem_->Signal();
+}
+
+
+void MessageDispatchHelperThread::Run() {
+ while (true) {
+ sem_->Wait();
+ {
+ ScopedLock lock(mutex_);
+ already_signalled_ = false;
+ }
+ {
+ Locker locker;
+ Debugger::CallMessageDispatchHandler();
+ }
+ }
+}
+
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal
diff --git a/src/debug.h b/src/debug.h
index 24f0db4..5ea2e52 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -559,6 +559,9 @@
};
+class MessageDispatchHelperThread;
+
+
// LockingCommandMessageQueue is a thread-safe circular buffer of CommandMessage
// messages. The message data is not managed by LockingCommandMessageQueue.
// Pointers to the data are passed in and out. Implemented by adding a
@@ -619,7 +622,8 @@
static void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
int period);
static void SetDebugMessageDispatchHandler(
- v8::Debug::DebugMessageDispatchHandler handler);
+ v8::Debug::DebugMessageDispatchHandler handler,
+ bool provide_locker);
// Invoke the message handler function.
static void InvokeMessageHandler(MessageImpl message);
@@ -636,7 +640,8 @@
bool* pending_exception);
// Start the debugger agent listening on the provided port.
- static bool StartAgent(const char* name, int port);
+ static bool StartAgent(const char* name, int port,
+ bool wait_for_connection = false);
// Stop the debugger agent.
static void StopAgent();
@@ -644,6 +649,8 @@
// Blocks until the agent has started listening for connections
static void WaitForAgent();
+ static void CallMessageDispatchHandler();
+
// Unload the debugger if possible. Only called when no debugger is currently
// active.
static void UnloadDebugger();
@@ -653,7 +660,9 @@
// Check whether the message handler was been cleared.
if (debugger_unload_pending_) {
- UnloadDebugger();
+ if (Debug::debugger_entry() == NULL) {
+ UnloadDebugger();
+ }
}
// Currently argument event is not used.
@@ -680,7 +689,9 @@
static v8::Debug::MessageHandler2 message_handler_;
static bool debugger_unload_pending_; // Was message handler cleared?
static v8::Debug::HostDispatchHandler host_dispatch_handler_;
+ static Mutex* dispatch_handler_access_; // Mutex guarding dispatch handler.
static v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
+ static MessageDispatchHelperThread* message_dispatch_helper_thread_;
static int host_dispatch_micros_;
static DebuggerAgent* agent_;
@@ -857,6 +868,27 @@
int reg_;
};
+// The optional thread that Debug Agent may use to temporary call V8 to process
+// pending debug requests if debuggee is not running V8 at the moment.
+// Techincally it does not call V8 itself, rather it asks embedding program
+// to do this via v8::Debug::HostDispatchHandler
+class MessageDispatchHelperThread: public Thread {
+ public:
+ MessageDispatchHelperThread();
+ ~MessageDispatchHelperThread();
+
+ void Schedule();
+
+ private:
+ void Run();
+
+ Semaphore* const sem_;
+ Mutex* const mutex_;
+ bool already_signalled_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessageDispatchHelperThread);
+};
+
} } // namespace v8::internal
diff --git a/src/execution.cc b/src/execution.cc
index 2f646a5..a79af23 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -30,6 +30,7 @@
#include "v8.h"
#include "api.h"
+#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
#include "simulator.h"
@@ -78,6 +79,10 @@
receiver = Handle<JSObject>(global->global_receiver());
}
+ // Make sure that the global object of the context we're about to
+ // make the current one is indeed a global object.
+ ASSERT(func->context()->global()->IsGlobalObject());
+
{
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
@@ -607,6 +612,11 @@
return Heap::undefined_value();
}
+ // Ignore debug break during bootstrapping.
+ if (Bootstrapper::IsActive()) {
+ return Heap::undefined_value();
+ }
+
{
JavaScriptFrameIterator it;
ASSERT(!it.done());
@@ -628,24 +638,32 @@
bool debug_command_only =
StackGuard::IsDebugCommand() && !StackGuard::IsDebugBreak();
- // Clear the debug request flags.
+ // Clear the debug break request flag.
StackGuard::Continue(DEBUGBREAK);
+
+ ProcessDebugMesssages(debug_command_only);
+
+ // Return to continue execution.
+ return Heap::undefined_value();
+}
+
+void Execution::ProcessDebugMesssages(bool debug_command_only) {
+ // Clear the debug command request flag.
StackGuard::Continue(DEBUGCOMMAND);
HandleScope scope;
// Enter the debugger. Just continue if we fail to enter the debugger.
EnterDebugger debugger;
if (debugger.FailedToEnter()) {
- return Heap::undefined_value();
+ return;
}
// Notify the debug event listeners. Indicate auto continue if the break was
// a debug command break.
Debugger::OnDebugBreak(Factory::undefined_value(), debug_command_only);
-
- // Return to continue execution.
- return Heap::undefined_value();
}
+
+
#endif
Object* Execution::HandleStackGuardInterrupt() {
diff --git a/src/execution.h b/src/execution.h
index 52198c4..10683d6 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -122,6 +122,7 @@
Handle<Object> is_global);
#ifdef ENABLE_DEBUGGER_SUPPORT
static Object* DebugBreakHelper();
+ static void ProcessDebugMesssages(bool debug_command_only);
#endif
// If the stack guard is triggered, but it is not an actual
diff --git a/src/factory.cc b/src/factory.cc
index 83775ef..8d20749 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -284,7 +284,8 @@
Handle<JSFunction> Factory::BaseNewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate,
- Handle<Map> function_map) {
+ Handle<Map> function_map,
+ PretenureFlag pretenure) {
ASSERT(boilerplate->IsBoilerplate());
ASSERT(!boilerplate->has_initial_map());
ASSERT(!boilerplate->has_prototype());
@@ -292,20 +293,22 @@
ASSERT(boilerplate->elements() == Heap::empty_fixed_array());
CALL_HEAP_FUNCTION(Heap::AllocateFunction(*function_map,
boilerplate->shared(),
- Heap::the_hole_value()),
+ Heap::the_hole_value(),
+ pretenure),
JSFunction);
}
Handle<JSFunction> Factory::NewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate,
- Handle<Context> context) {
- Handle<JSFunction> result =
- BaseNewFunctionFromBoilerplate(boilerplate, Top::function_map());
+ Handle<Context> context,
+ PretenureFlag pretenure) {
+ Handle<JSFunction> result = BaseNewFunctionFromBoilerplate(
+ boilerplate, Top::function_map(), pretenure);
result->set_context(*context);
int number_of_literals = boilerplate->NumberOfLiterals();
Handle<FixedArray> literals =
- Factory::NewFixedArray(number_of_literals, TENURED);
+ Factory::NewFixedArray(number_of_literals, pretenure);
if (number_of_literals > 0) {
// Store the object, regexp and array functions in the literals
// array prefix. These functions will be used when creating
@@ -715,6 +718,11 @@
}
+Handle<Object> Factory::ToObject(Handle<Object> object) {
+ CALL_HEAP_FUNCTION(object->ToObject(), Object);
+}
+
+
Handle<Object> Factory::ToObject(Handle<Object> object,
Handle<Context> global_context) {
CALL_HEAP_FUNCTION(object->ToObject(*global_context), Object);
@@ -763,6 +771,8 @@
Handle<JSFunction> Factory::CreateApiFunction(
Handle<FunctionTemplateInfo> obj, ApiInstanceType instance_type) {
Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::HandleApiCall));
+ Handle<Code> construct_stub =
+ Handle<Code>(Builtins::builtin(Builtins::JSConstructStubApi));
int internal_field_count = 0;
if (!obj->instance_template()->IsUndefined()) {
@@ -837,6 +847,7 @@
}
result->shared()->set_function_data(*obj);
+ result->shared()->set_construct_stub(*construct_stub);
result->shared()->DontAdaptArguments();
// Recursively copy parent templates' accessors, 'data' may be modified.
diff --git a/src/factory.h b/src/factory.h
index 951c043..2a347cd 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -219,7 +219,8 @@
static Handle<JSFunction> NewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate,
- Handle<Context> context);
+ Handle<Context> context,
+ PretenureFlag pretenure = TENURED);
static Handle<Code> NewCode(const CodeDesc& desc,
ZoneScopeInfo* sinfo,
@@ -228,6 +229,7 @@
static Handle<Code> CopyCode(Handle<Code> code);
+ static Handle<Object> ToObject(Handle<Object> object);
static Handle<Object> ToObject(Handle<Object> object,
Handle<Context> global_context);
@@ -374,7 +376,8 @@
static Handle<JSFunction> BaseNewFunctionFromBoilerplate(
Handle<JSFunction> boilerplate,
- Handle<Map> function_map);
+ Handle<Map> function_map,
+ PretenureFlag pretenure);
// Create a new map cache.
static Handle<MapCache> NewMapCache(int at_least_space_for);
diff --git a/src/fast-codegen.cc b/src/fast-codegen.cc
index 1bdc367..e90a44e 100644
--- a/src/fast-codegen.cc
+++ b/src/fast-codegen.cc
@@ -36,7 +36,7 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm())
Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
Handle<Script> script,
@@ -67,7 +67,8 @@
case Slot::LOCAL:
offset += JavaScriptFrameConstants::kLocal0Offset;
break;
- default:
+ case Slot::CONTEXT:
+ case Slot::LOOKUP:
UNREACHABLE();
}
return offset;
@@ -149,6 +150,13 @@
}
+void FastCodeGenerator::SetStatementPosition(int pos) {
+ if (FLAG_debug_info) {
+ CodeGenerator::RecordPositions(masm_, pos);
+ }
+}
+
+
void FastCodeGenerator::SetSourcePosition(int pos) {
if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
masm_->RecordPosition(pos);
@@ -157,72 +165,66 @@
void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
-#ifdef DEBUG
- Expression::Context expected = Expression::kUninitialized;
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect: // Fall through.
- case Expression::kTest:
- // The value of the left subexpression is not needed.
- expected = Expression::kTest;
- break;
- case Expression::kValue:
- // The value of the left subexpression is needed and its specific
- // context depends on the operator.
- expected = (expr->op() == Token::OR)
- ? Expression::kValueTest
- : Expression::kTestValue;
- break;
- case Expression::kValueTest:
- // The value of the left subexpression is needed for OR.
- expected = (expr->op() == Token::OR)
- ? Expression::kValueTest
- : Expression::kTest;
- break;
- case Expression::kTestValue:
- // The value of the left subexpression is needed for AND.
- expected = (expr->op() == Token::OR)
- ? Expression::kTest
- : Expression::kTestValue;
- break;
- }
- ASSERT_EQ(expected, expr->left()->context());
- ASSERT_EQ(expr->context(), expr->right()->context());
-#endif
-
Label eval_right, done;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
- // Set up the appropriate context for the left subexpression based on the
- // operation and our own context.
+ // Set up the appropriate context for the left subexpression based
+ // on the operation and our own context. Initially assume we can
+ // inherit both true and false labels from our context.
if (expr->op() == Token::OR) {
- // If there is no usable true label in the OR expression's context, use
- // the end of this expression, otherwise inherit the same true label.
- if (expr->context() == Expression::kEffect ||
- expr->context() == Expression::kValue) {
- true_label_ = &done;
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ VisitForControl(expr->left(), &done, &eval_right);
+ break;
+ case Expression::kValue:
+ VisitForValueControl(expr->left(),
+ location_,
+ &done,
+ &eval_right);
+ break;
+ case Expression::kTest:
+ VisitForControl(expr->left(), true_label_, &eval_right);
+ break;
+ case Expression::kValueTest:
+ VisitForValueControl(expr->left(),
+ location_,
+ true_label_,
+ &eval_right);
+ break;
+ case Expression::kTestValue:
+ VisitForControl(expr->left(), true_label_, &eval_right);
+ break;
}
- // The false label is the label of the second subexpression.
- false_label_ = &eval_right;
} else {
ASSERT_EQ(Token::AND, expr->op());
- // The true label is the label of the second subexpression.
- true_label_ = &eval_right;
- // If there is no usable false label in the AND expression's context,
- // use the end of the expression, otherwise inherit the same false
- // label.
- if (expr->context() == Expression::kEffect ||
- expr->context() == Expression::kValue) {
- false_label_ = &done;
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ VisitForControl(expr->left(), &eval_right, &done);
+ break;
+ case Expression::kValue:
+ VisitForControlValue(expr->left(),
+ location_,
+ &eval_right,
+ &done);
+ break;
+ case Expression::kTest:
+ VisitForControl(expr->left(), &eval_right, false_label_);
+ break;
+ case Expression::kValueTest:
+ VisitForControl(expr->left(), &eval_right, false_label_);
+ break;
+ case Expression::kTestValue:
+ VisitForControlValue(expr->left(),
+ location_,
+ &eval_right,
+ false_label_);
+ break;
}
}
- Visit(expr->left());
- true_label_ = saved_true;
- false_label_ = saved_false;
-
__ bind(&eval_right);
Visit(expr->right());
@@ -232,15 +234,17 @@
void FastCodeGenerator::VisitBlock(Block* stmt) {
Comment cmnt(masm_, "[ Block");
+ Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
VisitStatements(stmt->statements());
+ __ bind(nested_statement.break_target());
}
void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
Comment cmnt(masm_, "[ ExpressionStatement");
SetStatementPosition(stmt);
- Visit(stmt->expression());
+ VisitForEffect(stmt->expression());
}
@@ -252,19 +256,11 @@
void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
Comment cmnt(masm_, "[ IfStatement");
- // Expressions cannot recursively enter statements, there are no labels in
- // the state.
- ASSERT_EQ(NULL, true_label_);
- ASSERT_EQ(NULL, false_label_);
+ SetStatementPosition(stmt);
Label then_part, else_part, done;
// Do not worry about optimizing for empty then or else bodies.
- true_label_ = &then_part;
- false_label_ = &else_part;
- ASSERT(stmt->condition()->context() == Expression::kTest);
- Visit(stmt->condition());
- true_label_ = NULL;
- false_label_ = NULL;
+ VisitForControl(stmt->condition(), &then_part, &else_part);
__ bind(&then_part);
Visit(stmt->then_statement());
@@ -278,22 +274,82 @@
void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ ContinueStatement");
+ SetStatementPosition(stmt);
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ while (!current->IsContinueTarget(stmt->target())) {
+ stack_depth = current->Exit(stack_depth);
+ current = current->outer();
+ }
+ __ Drop(stack_depth);
+
+ Iteration* loop = current->AsIteration();
+ __ jmp(loop->continue_target());
}
void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ BreakStatement");
+ SetStatementPosition(stmt);
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ while (!current->IsBreakTarget(stmt->target())) {
+ stack_depth = current->Exit(stack_depth);
+ current = current->outer();
+ }
+ __ Drop(stack_depth);
+
+ Breakable* target = current->AsBreakable();
+ __ jmp(target->break_target());
+}
+
+
+void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+ Comment cmnt(masm_, "[ ReturnStatement");
+ SetStatementPosition(stmt);
+ Expression* expr = stmt->expression();
+ VisitForValue(expr, kAccumulator);
+
+ // Exit all nested statements.
+ NestedStatement* current = nesting_stack_;
+ int stack_depth = 0;
+ while (current != NULL) {
+ stack_depth = current->Exit(stack_depth);
+ current = current->outer();
+ }
+ __ Drop(stack_depth);
+
+ EmitReturnSequence(stmt->statement_pos());
}
void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ WithEnterStatement");
+ SetStatementPosition(stmt);
+
+ VisitForValue(stmt->expression(), kStack);
+ if (stmt->is_catch_block()) {
+ __ CallRuntime(Runtime::kPushCatchContext, 1);
+ } else {
+ __ CallRuntime(Runtime::kPushContext, 1);
+ }
+ // Both runtime calls return the new context in both the context and the
+ // result registers.
+
+ // Update local stack frame context field.
+ StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
}
void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ WithExitStatement");
+ SetStatementPosition(stmt);
+
+ // Pop context.
+ LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+ // Update local stack frame context field.
+ StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
}
@@ -304,8 +360,11 @@
void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement");
+ SetStatementPosition(stmt);
+ Label body, stack_limit_hit, stack_check_success;
+
+ Iteration loop_statement(this, stmt);
increment_loop_depth();
- Label body, exit, stack_limit_hit, stack_check_success;
__ bind(&body);
Visit(stmt->body());
@@ -314,23 +373,16 @@
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
- // We are not in an expression context because we have been compiling
- // statements. Set up a test expression context for the condition.
- ASSERT_EQ(NULL, true_label_);
- ASSERT_EQ(NULL, false_label_);
- true_label_ = &body;
- false_label_ = &exit;
- ASSERT(stmt->cond()->context() == Expression::kTest);
- Visit(stmt->cond());
- true_label_ = NULL;
- false_label_ = NULL;
+ __ bind(loop_statement.continue_target());
+ SetStatementPosition(stmt->condition_position());
+ VisitForControl(stmt->cond(), &body, loop_statement.break_target());
__ bind(&stack_limit_hit);
StackCheckStub stack_stub;
__ CallStub(&stack_stub);
__ jmp(&stack_check_success);
- __ bind(&exit);
+ __ bind(loop_statement.break_target());
decrement_loop_depth();
}
@@ -338,85 +390,37 @@
void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
Comment cmnt(masm_, "[ WhileStatement");
+ SetStatementPosition(stmt);
+ Label body, stack_limit_hit, stack_check_success;
+
+ Iteration loop_statement(this, stmt);
increment_loop_depth();
- Label test, body, exit, stack_limit_hit, stack_check_success;
// Emit the test at the bottom of the loop.
- __ jmp(&test);
+ __ jmp(loop_statement.continue_target());
__ bind(&body);
Visit(stmt->body());
- __ bind(&test);
+ __ bind(loop_statement.continue_target());
// Check stack before looping.
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
- // We are not in an expression context because we have been compiling
- // statements. Set up a test expression context for the condition.
- ASSERT_EQ(NULL, true_label_);
- ASSERT_EQ(NULL, false_label_);
- true_label_ = &body;
- false_label_ = &exit;
- ASSERT(stmt->cond()->context() == Expression::kTest);
- Visit(stmt->cond());
- true_label_ = NULL;
- false_label_ = NULL;
+ VisitForControl(stmt->cond(), &body, loop_statement.break_target());
__ bind(&stack_limit_hit);
StackCheckStub stack_stub;
__ CallStub(&stack_stub);
__ jmp(&stack_check_success);
- __ bind(&exit);
-
+ __ bind(loop_statement.break_target());
decrement_loop_depth();
}
void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
- Comment cmnt(masm_, "[ ForStatement");
- Label test, body, exit, stack_limit_hit, stack_check_success;
- if (stmt->init() != NULL) Visit(stmt->init());
-
- increment_loop_depth();
- // Emit the test at the bottom of the loop (even if empty).
- __ jmp(&test);
- __ bind(&body);
- Visit(stmt->body());
-
- // Check stack before looping.
- __ StackLimitCheck(&stack_limit_hit);
- __ bind(&stack_check_success);
-
- if (stmt->next() != NULL) Visit(stmt->next());
-
- __ bind(&test);
-
- if (stmt->cond() == NULL) {
- // For an empty test jump to the top of the loop.
- __ jmp(&body);
- } else {
- // We are not in an expression context because we have been compiling
- // statements. Set up a test expression context for the condition.
- ASSERT_EQ(NULL, true_label_);
- ASSERT_EQ(NULL, false_label_);
-
- true_label_ = &body;
- false_label_ = &exit;
- ASSERT(stmt->cond()->context() == Expression::kTest);
- Visit(stmt->cond());
- true_label_ = NULL;
- false_label_ = NULL;
- }
-
- __ bind(&stack_limit_hit);
- StackCheckStub stack_stub;
- __ CallStub(&stack_stub);
- __ jmp(&stack_check_success);
-
- __ bind(&exit);
- decrement_loop_depth();
+ UNREACHABLE();
}
@@ -426,12 +430,109 @@
void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ TryCatchStatement");
+ SetStatementPosition(stmt);
+ // The try block adds a handler to the exception handler chain
+ // before entering, and removes it again when exiting normally.
+ // If an exception is thrown during execution of the try block,
+ // control is passed to the handler, which also consumes the handler.
+ // At this point, the exception is in a register, and store it in
+ // the temporary local variable (prints as ".catch-var") before
+ // executing the catch block. The catch block has been rewritten
+ // to introduce a new scope to bind the catch variable and to remove
+ // that scope again afterwards.
+
+ Label try_handler_setup, catch_entry, done;
+ __ Call(&try_handler_setup);
+ // Try handler code, exception in result register.
+
+ // Store exception in local .catch variable before executing catch block.
+ {
+ // The catch variable is *always* a variable proxy for a local variable.
+ Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(catch_var);
+ Slot* variable_slot = catch_var->slot();
+ ASSERT_NOT_NULL(variable_slot);
+ ASSERT_EQ(Slot::LOCAL, variable_slot->type());
+ StoreToFrameField(SlotOffset(variable_slot), result_register());
+ }
+
+ Visit(stmt->catch_block());
+ __ jmp(&done);
+
+ // Try block code. Sets up the exception handler chain.
+ __ bind(&try_handler_setup);
+ {
+ TryCatch try_block(this, &catch_entry);
+ __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
+ Visit(stmt->try_block());
+ __ PopTryHandler();
+ }
+ __ bind(&done);
}
void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ TryFinallyStatement");
+ SetStatementPosition(stmt);
+ // Try finally is compiled by setting up a try-handler on the stack while
+ // executing the try body, and removing it again afterwards.
+ //
+ // The try-finally construct can enter the finally block in three ways:
+ // 1. By exiting the try-block normally. This removes the try-handler and
+ // calls the finally block code before continuing.
+ // 2. By exiting the try-block with a function-local control flow transfer
+ // (break/continue/return). The site of the, e.g., break removes the
+ // try handler and calls the finally block code before continuing
+ // its outward control transfer.
+ // 3. by exiting the try-block with a thrown exception.
+ // This can happen in nested function calls. It traverses the try-handler
+ // chain and consumes the try-handler entry before jumping to the
+ // handler code. The handler code then calls the finally-block before
+ // rethrowing the exception.
+ //
+ // The finally block must assume a return address on top of the stack
+ // (or in the link register on ARM chips) and a value (return value or
+ // exception) in the result register (rax/eax/r0), both of which must
+ // be preserved. The return address isn't GC-safe, so it should be
+ // cooked before GC.
+ Label finally_entry;
+ Label try_handler_setup;
+
+ // Setup the try-handler chain. Use a call to
+ // Jump to try-handler setup and try-block code. Use call to put try-handler
+ // address on stack.
+ __ Call(&try_handler_setup);
+ // Try handler code. Return address of call is pushed on handler stack.
+ {
+ // This code is only executed during stack-handler traversal when an
+ // exception is thrown. The execption is in the result register, which
+ // is retained by the finally block.
+ // Call the finally block and then rethrow the exception.
+ __ Call(&finally_entry);
+ __ push(result_register());
+ __ CallRuntime(Runtime::kReThrow, 1);
+ }
+
+ __ bind(&finally_entry);
+ {
+ // Finally block implementation.
+ Finally finally_block(this);
+ EnterFinallyBlock();
+ Visit(stmt->finally_block());
+ ExitFinallyBlock(); // Return to the calling code.
+ }
+
+ __ bind(&try_handler_setup);
+ {
+ // Setup try handler (stack pointer registers).
+ TryFinally try_block(this, &finally_entry);
+ __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
+ Visit(stmt->try_block());
+ __ PopTryHandler();
+ }
+ // Execute the finally block on the way out.
+ __ Call(&finally_entry);
}
@@ -453,34 +554,20 @@
void FastCodeGenerator::VisitConditional(Conditional* expr) {
Comment cmnt(masm_, "[ Conditional");
- ASSERT_EQ(Expression::kTest, expr->condition()->context());
- ASSERT_EQ(expr->context(), expr->then_expression()->context());
- ASSERT_EQ(expr->context(), expr->else_expression()->context());
-
-
Label true_case, false_case, done;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
-
- true_label_ = &true_case;
- false_label_ = &false_case;
- Visit(expr->condition());
- true_label_ = saved_true;
- false_label_ = saved_false;
+ VisitForControl(expr->condition(), &true_case, &false_case);
__ bind(&true_case);
Visit(expr->then_expression());
// If control flow falls through Visit, jump to done.
- if (expr->context() == Expression::kEffect ||
- expr->context() == Expression::kValue) {
+ if (context_ == Expression::kEffect || context_ == Expression::kValue) {
__ jmp(&done);
}
__ bind(&false_case);
Visit(expr->else_expression());
// If control flow falls through Visit, merge it with true case here.
- if (expr->context() == Expression::kEffect ||
- expr->context() == Expression::kValue) {
+ if (context_ == Expression::kEffect || context_ == Expression::kValue) {
__ bind(&done);
}
}
@@ -494,50 +581,83 @@
void FastCodeGenerator::VisitLiteral(Literal* expr) {
Comment cmnt(masm_, "[ Literal");
- Move(expr->context(), expr);
+ Apply(context_, expr);
}
void FastCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment");
- ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
-
- // Record source code position of the (possible) IC call.
- SetSourcePosition(expr->position());
-
// Left-hand side can only be a property, a global or a (parameter or local)
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->target()->AsProperty();
- // In case of a property we use the uninitialized expression context
- // of the key to detect a named property.
if (prop != NULL) {
- assign_type = (prop->key()->context() == Expression::kUninitialized)
- ? NAMED_PROPERTY
- : KEYED_PROPERTY;
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
}
- Expression* rhs = expr->value();
- ASSERT_EQ(Expression::kValue, rhs->context());
-
+ // Evaluate LHS expression.
switch (assign_type) {
case VARIABLE:
- Visit(rhs);
- EmitVariableAssignment(expr);
+ // Nothing to do here.
break;
case NAMED_PROPERTY:
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- Visit(rhs);
+ VisitForValue(prop->obj(), kStack);
+ break;
+ case KEYED_PROPERTY:
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
+ break;
+ }
+
+ // If we have a compound assignment: Get value of LHS expression and
+ // store in on top of the stack.
+ if (expr->is_compound()) {
+ Location saved_location = location_;
+ location_ = kStack;
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
+ Expression::kValue);
+ break;
+ case NAMED_PROPERTY:
+ EmitNamedPropertyLoad(prop);
+ __ push(result_register());
+ break;
+ case KEYED_PROPERTY:
+ EmitKeyedPropertyLoad(prop);
+ __ push(result_register());
+ break;
+ }
+ location_ = saved_location;
+ }
+
+ // Evaluate RHS expression.
+ Expression* rhs = expr->value();
+ VisitForValue(rhs, kAccumulator);
+
+ // If we have a compount assignment: Apply operator.
+ if (expr->is_compound()) {
+ Location saved_location = location_;
+ location_ = kAccumulator;
+ EmitBinaryOp(expr->binary_op(), Expression::kValue);
+ location_ = saved_location;
+ }
+
+ // Record source position before possible IC call.
+ SetSourcePosition(expr->position());
+
+ // Store the value.
+ switch (assign_type) {
+ case VARIABLE:
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ context_);
+ break;
+ case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
case KEYED_PROPERTY:
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- Visit(prop->key());
- ASSERT_EQ(Expression::kValue, prop->key()->context());
- Visit(rhs);
EmitKeyedPropertyAssignment(expr);
break;
}
@@ -545,12 +665,39 @@
void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- UNREACHABLE();
+ // Call runtime routine to allocate the catch extension object and
+ // assign the exception value to the catch variable.
+ Comment cmnt(masm_, "[ CatchExtensionObject");
+ VisitForValue(expr->key(), kStack);
+ VisitForValue(expr->value(), kStack);
+ // Create catch extension object.
+ __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+ Apply(context_, result_register());
}
void FastCodeGenerator::VisitThrow(Throw* expr) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ Throw");
+ VisitForValue(expr->exception(), kStack);
+ __ CallRuntime(Runtime::kThrow, 1);
+ // Never returns here.
+}
+
+
+int FastCodeGenerator::TryFinally::Exit(int stack_depth) {
+ // The macros used here must preserve the result register.
+ __ Drop(stack_depth);
+ __ PopTryHandler();
+ __ Call(finally_entry_);
+ return 0;
+}
+
+
+int FastCodeGenerator::TryCatch::Exit(int stack_depth) {
+ // The macros used here must preserve the result register.
+ __ Drop(stack_depth);
+ __ PopTryHandler();
+ return 0;
}
diff --git a/src/fast-codegen.h b/src/fast-codegen.h
index 9b262a7..c26e0f3 100644
--- a/src/fast-codegen.h
+++ b/src/fast-codegen.h
@@ -35,6 +35,8 @@
namespace v8 {
namespace internal {
+// -----------------------------------------------------------------------------
+// Fast code generator.
class FastCodeGenerator: public AstVisitor {
public:
@@ -43,7 +45,9 @@
function_(NULL),
script_(script),
is_eval_(is_eval),
+ nesting_stack_(NULL),
loop_depth_(0),
+ location_(kStack),
true_label_(NULL),
false_label_(NULL) {
}
@@ -55,26 +59,264 @@
void Generate(FunctionLiteral* fun);
private:
+ class Breakable;
+ class Iteration;
+ class TryCatch;
+ class TryFinally;
+ class Finally;
+ class ForIn;
+
+ class NestedStatement BASE_EMBEDDED {
+ public:
+ explicit NestedStatement(FastCodeGenerator* codegen) : codegen_(codegen) {
+ // Link into codegen's nesting stack.
+ previous_ = codegen->nesting_stack_;
+ codegen->nesting_stack_ = this;
+ }
+ virtual ~NestedStatement() {
+ // Unlink from codegen's nesting stack.
+ ASSERT_EQ(this, codegen_->nesting_stack_);
+ codegen_->nesting_stack_ = previous_;
+ }
+
+ virtual Breakable* AsBreakable() { return NULL; }
+ virtual Iteration* AsIteration() { return NULL; }
+ virtual TryCatch* AsTryCatch() { return NULL; }
+ virtual TryFinally* AsTryFinally() { return NULL; }
+ virtual Finally* AsFinally() { return NULL; }
+ virtual ForIn* AsForIn() { return NULL; }
+
+ virtual bool IsContinueTarget(Statement* target) { return false; }
+ virtual bool IsBreakTarget(Statement* target) { return false; }
+
+ // Generate code to leave the nested statement. This includes
+ // cleaning up any stack elements in use and restoring the
+ // stack to the expectations of the surrounding statements.
+ // Takes a number of stack elements currently on top of the
+ // nested statement's stack, and returns a number of stack
+ // elements left on top of the surrounding statement's stack.
+ // The generated code must preserve the result register (which
+ // contains the value in case of a return).
+ virtual int Exit(int stack_depth) {
+ // Default implementation for the case where there is
+ // nothing to clean up.
+ return stack_depth;
+ }
+ NestedStatement* outer() { return previous_; }
+ protected:
+ MacroAssembler* masm() { return codegen_->masm(); }
+ private:
+ FastCodeGenerator* codegen_;
+ NestedStatement* previous_;
+ DISALLOW_COPY_AND_ASSIGN(NestedStatement);
+ };
+
+ class Breakable : public NestedStatement {
+ public:
+ Breakable(FastCodeGenerator* codegen,
+ BreakableStatement* break_target)
+ : NestedStatement(codegen),
+ target_(break_target) {}
+ virtual ~Breakable() {}
+ virtual Breakable* AsBreakable() { return this; }
+ virtual bool IsBreakTarget(Statement* statement) {
+ return target_ == statement;
+ }
+ BreakableStatement* statement() { return target_; }
+ Label* break_target() { return &break_target_label_; }
+ private:
+ BreakableStatement* target_;
+ Label break_target_label_;
+ DISALLOW_COPY_AND_ASSIGN(Breakable);
+ };
+
+ class Iteration : public Breakable {
+ public:
+ Iteration(FastCodeGenerator* codegen,
+ IterationStatement* iteration_statement)
+ : Breakable(codegen, iteration_statement) {}
+ virtual ~Iteration() {}
+ virtual Iteration* AsIteration() { return this; }
+ virtual bool IsContinueTarget(Statement* statement) {
+ return this->statement() == statement;
+ }
+ Label* continue_target() { return &continue_target_label_; }
+ private:
+ Label continue_target_label_;
+ DISALLOW_COPY_AND_ASSIGN(Iteration);
+ };
+
+ // The environment inside the try block of a try/catch statement.
+ class TryCatch : public NestedStatement {
+ public:
+ explicit TryCatch(FastCodeGenerator* codegen, Label* catch_entry)
+ : NestedStatement(codegen), catch_entry_(catch_entry) { }
+ virtual ~TryCatch() {}
+ virtual TryCatch* AsTryCatch() { return this; }
+ Label* catch_entry() { return catch_entry_; }
+ virtual int Exit(int stack_depth);
+ private:
+ Label* catch_entry_;
+ DISALLOW_COPY_AND_ASSIGN(TryCatch);
+ };
+
+ // The environment inside the try block of a try/finally statement.
+ class TryFinally : public NestedStatement {
+ public:
+ explicit TryFinally(FastCodeGenerator* codegen, Label* finally_entry)
+ : NestedStatement(codegen), finally_entry_(finally_entry) { }
+ virtual ~TryFinally() {}
+ virtual TryFinally* AsTryFinally() { return this; }
+ Label* finally_entry() { return finally_entry_; }
+ virtual int Exit(int stack_depth);
+ private:
+ Label* finally_entry_;
+ DISALLOW_COPY_AND_ASSIGN(TryFinally);
+ };
+
+ // A FinallyEnvironment represents being inside a finally block.
+ // Abnormal termination of the finally block needs to clean up
+ // the block's parameters from the stack.
+ class Finally : public NestedStatement {
+ public:
+ explicit Finally(FastCodeGenerator* codegen) : NestedStatement(codegen) { }
+ virtual ~Finally() {}
+ virtual Finally* AsFinally() { return this; }
+ virtual int Exit(int stack_depth) {
+ return stack_depth + kFinallyStackElementCount;
+ }
+ private:
+ // Number of extra stack slots occupied during a finally block.
+ static const int kFinallyStackElementCount = 2;
+ DISALLOW_COPY_AND_ASSIGN(Finally);
+ };
+
+ // A ForInEnvironment represents being inside a for-in loop.
+ // Abnormal termination of the for-in block needs to clean up
+ // the block's temporary storage from the stack.
+ class ForIn : public Iteration {
+ public:
+ ForIn(FastCodeGenerator* codegen,
+ ForInStatement* statement)
+ : Iteration(codegen, statement) { }
+ virtual ~ForIn() {}
+ virtual ForIn* AsForIn() { return this; }
+ virtual int Exit(int stack_depth) {
+ return stack_depth + kForInStackElementCount;
+ }
+ private:
+ // TODO(lrn): Check that this value is correct when implementing
+ // for-in.
+ static const int kForInStackElementCount = 5;
+ DISALLOW_COPY_AND_ASSIGN(ForIn);
+ };
+
+ enum Location {
+ kAccumulator,
+ kStack
+ };
+
int SlotOffset(Slot* slot);
- void Move(Expression::Context destination, Register source);
- void Move(Expression::Context destination, Slot* source, Register scratch);
- void Move(Expression::Context destination, Literal* source);
+
+ // Emit code to convert a pure value (in a register, slot, as a literal,
+ // or on top of the stack) into the result expected according to an
+ // expression context.
+ void Apply(Expression::Context context, Register reg);
+ void Apply(Expression::Context context, Slot* slot);
+ void Apply(Expression::Context context, Literal* lit);
+ void ApplyTOS(Expression::Context context);
+
+ // Emit code to discard count elements from the top of stack, then convert
+ // a pure value into the result expected according to an expression
+ // context.
+ void DropAndApply(int count, Expression::Context context, Register reg);
+
+ // Emit code to convert pure control flow to a pair of labels into the
+ // result expected according to an expression context.
+ void Apply(Expression::Context context,
+ Label* materialize_true,
+ Label* materialize_false);
+
+ // Helper function to convert a pure value into a test context. The value
+ // is expected on the stack or the accumulator, depending on the platform.
+ // See the platform-specific implementation for details.
+ void DoTest(Expression::Context context);
+
void Move(Slot* dst, Register source, Register scratch1, Register scratch2);
void Move(Register dst, Slot* source);
- // Templated to allow for Operand on intel and MemOperand on ARM.
- template <typename MemoryLocation>
- MemoryLocation CreateSlotOperand(Slot* slot, Register scratch);
+ // Return an operand used to read/write to a known (ie, non-LOOKUP) slot.
+ // May emit code to traverse the context chain, destroying the scratch
+ // register.
+ MemOperand EmitSlotSearch(Slot* slot, Register scratch);
- // Drop the TOS, and store source to destination.
- // If destination is TOS, just overwrite TOS with source.
- void DropAndMove(Expression::Context destination,
- Register source,
- int drop_count = 1);
+ void VisitForEffect(Expression* expr) {
+ Expression::Context saved_context = context_;
+ context_ = Expression::kEffect;
+ Visit(expr);
+ context_ = saved_context;
+ }
- // Test the JavaScript value in source as if in a test context, compile
- // control flow to a pair of labels.
- void TestAndBranch(Register source, Label* true_label, Label* false_label);
+ void VisitForValue(Expression* expr, Location where) {
+ Expression::Context saved_context = context_;
+ Location saved_location = location_;
+ context_ = Expression::kValue;
+ location_ = where;
+ Visit(expr);
+ context_ = saved_context;
+ location_ = saved_location;
+ }
+
+ void VisitForControl(Expression* expr, Label* if_true, Label* if_false) {
+ Expression::Context saved_context = context_;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ context_ = Expression::kTest;
+ true_label_ = if_true;
+ false_label_ = if_false;
+ Visit(expr);
+ context_ = saved_context;
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ }
+
+ void VisitForValueControl(Expression* expr,
+ Location where,
+ Label* if_true,
+ Label* if_false) {
+ Expression::Context saved_context = context_;
+ Location saved_location = location_;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ context_ = Expression::kValueTest;
+ location_ = where;
+ true_label_ = if_true;
+ false_label_ = if_false;
+ Visit(expr);
+ context_ = saved_context;
+ location_ = saved_location;
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ }
+
+ void VisitForControlValue(Expression* expr,
+ Location where,
+ Label* if_true,
+ Label* if_false) {
+ Expression::Context saved_context = context_;
+ Location saved_location = location_;
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ context_ = Expression::kTestValue;
+ location_ = where;
+ true_label_ = if_true;
+ false_label_ = if_false;
+ Visit(expr);
+ context_ = saved_context;
+ location_ = saved_location;
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ }
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void DeclareGlobals(Handle<FixedArray> pairs);
@@ -84,27 +326,49 @@
// Platform-specific code sequences for calls
void EmitCallWithStub(Call* expr);
- void EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info);
+ void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
+
+ // Platform-specific code for loading variables.
+ void EmitVariableLoad(Variable* expr, Expression::Context context);
// Platform-specific support for compiling assignments.
- // Complete a variable assignment. The right-hand-side value is expected
- // on top of the stack.
- void EmitVariableAssignment(Assignment* expr);
+ // Load a value from a named property.
+ // The receiver is left on the stack by the IC.
+ void EmitNamedPropertyLoad(Property* expr);
- // Complete a named property assignment. The receiver and right-hand-side
- // value are expected on top of the stack.
+ // Load a value from a keyed property.
+ // The receiver and the key is left on the stack by the IC.
+ void EmitKeyedPropertyLoad(Property* expr);
+
+ // Apply the compound assignment operator. Expects the left operand on top
+ // of the stack and the right one in the accumulator.
+ void EmitBinaryOp(Token::Value op, Expression::Context context);
+
+ // Complete a variable assignment. The right-hand-side value is expected
+ // in the accumulator.
+ void EmitVariableAssignment(Variable* var, Expression::Context context);
+
+ // Complete a named property assignment. The receiver is expected on top
+ // of the stack and the right-hand-side value in the accumulator.
void EmitNamedPropertyAssignment(Assignment* expr);
- // Complete a keyed property assignment. The reciever, key, and
- // right-hand-side value are expected on top of the stack.
+ // Complete a keyed property assignment. The receiver and key are
+ // expected on top of the stack and the right-hand-side value in the
+ // accumulator.
void EmitKeyedPropertyAssignment(Assignment* expr);
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt);
+ void SetStatementPosition(int pos);
void SetSourcePosition(int pos);
+ // Non-local control flow support.
+ void EnterFinallyBlock();
+ void ExitFinallyBlock();
+
+ // Loop nesting counter.
int loop_depth() { return loop_depth_; }
void increment_loop_depth() { loop_depth_++; }
void decrement_loop_depth() {
@@ -112,11 +376,22 @@
loop_depth_--;
}
+ MacroAssembler* masm() { return masm_; }
+ static Register result_register();
+ static Register context_register();
+
+ // Set fields in the stack frame. Offsets are the frame pointer relative
+ // offsets defined in, e.g., StandardFrameConstants.
+ void StoreToFrameField(int frame_offset, Register value);
+
+ // Load a value from the current context. Indices are defined as an enum
+ // in v8::internal::Context.
+ void LoadContextField(Register dst, int context_index);
+
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
-
// Handles the shortcutted logical binary operations in VisitBinaryOperation.
void EmitLogicalOperation(BinaryOperation* expr);
@@ -125,11 +400,16 @@
Handle<Script> script_;
bool is_eval_;
Label return_label_;
+ NestedStatement* nesting_stack_;
int loop_depth_;
+ Expression::Context context_;
+ Location location_;
Label* true_label_;
Label* false_label_;
+ friend class NestedStatement;
+
DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
};
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 88fda12..5c0aa0c 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -198,6 +198,9 @@
DEFINE_bool(canonicalize_object_literal_maps, true,
"Canonicalize maps for object literals.")
+DEFINE_bool(use_big_map_space, true,
+ "Use big map space, but don't compact if it grew too big.")
+
// mksnapshot.cc
DEFINE_bool(h, false, "print this message")
DEFINE_bool(new_snapshot, true, "use new snapshot implementation")
@@ -228,6 +231,7 @@
// Regexp
DEFINE_bool(trace_regexps, false, "trace regexp execution")
DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
+DEFINE_bool(regexp_entry_native, true, "use native code to enter regexp")
// Testing flags test/cctest/test-{flags,api,serialization}.cc
DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
@@ -325,6 +329,9 @@
"(requires heap_stats)")
// Regexp
+DEFINE_bool(regexp_possessive_quantifier,
+ false,
+ "enable possessive quantifier syntax for testing")
DEFINE_bool(trace_regexp_bytecodes, false, "trace regexp bytecode execution")
DEFINE_bool(trace_regexp_assembler,
false,
@@ -351,6 +358,8 @@
DEFINE_bool(log_gc, false,
"Log heap samples on garbage collection for the hp2ps tool.")
DEFINE_bool(log_handles, false, "Log global handle events.")
+DEFINE_bool(log_snapshot_positions, false,
+ "log positions of (de)serialized objects in the snapshot.")
DEFINE_bool(log_state_changes, false, "Log state changes.")
DEFINE_bool(log_suspect, false, "Log suspect operations.")
DEFINE_bool(log_producers, false, "Log stack traces of JS objects allocations.")
diff --git a/src/frames.cc b/src/frames.cc
index 7c327dd..2f90a31 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -306,7 +306,7 @@
void StackHandler::Uncook(Code* code) {
- ASSERT(MarkCompactCollector::IsCompacting());
+ ASSERT(MarkCompactCollector::HasCompacted());
set_pc(code->instruction_start() + OffsetFrom(pc()));
ASSERT(code->contains(pc()));
}
@@ -336,7 +336,7 @@
void StackFrame::UncookFramesForThread(ThreadLocalTop* thread) {
// Only uncooking frames when the collector is compacting and thus moving code
// around.
- ASSERT(MarkCompactCollector::IsCompacting());
+ ASSERT(MarkCompactCollector::HasCompacted());
ASSERT(thread->stack_is_cooked());
for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
it.frame()->Uncook();
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 1a0c982..e4bb925 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -168,6 +168,12 @@
if (first_deallocated()) {
first_deallocated()->set_next(head());
}
+ // Check that we are not passing a finalized external string to
+ // the callback.
+ ASSERT(!object_->IsExternalAsciiString() ||
+ ExternalAsciiString::cast(object_)->resource() != NULL);
+ ASSERT(!object_->IsExternalTwoByteString() ||
+ ExternalTwoByteString::cast(object_)->resource() != NULL);
// Leaving V8.
VMState state(EXTERNAL);
func(object, par);
@@ -507,5 +513,4 @@
object_groups->Clear();
}
-
} } // namespace v8::internal
diff --git a/src/globals.h b/src/globals.h
index ad0539f..f5cb1c0 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -145,6 +145,14 @@
const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
+// Desired alignment for maps.
+#if V8_HOST_ARCH_64_BIT
+const intptr_t kMapAlignmentBits = kObjectAlignmentBits;
+#else
+const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3;
+#endif
+const intptr_t kMapAlignment = (1 << kMapAlignmentBits);
+const intptr_t kMapAlignmentMask = kMapAlignment - 1;
// Tag information for Failure.
const int kFailureTag = 3;
@@ -174,6 +182,11 @@
#endif
+// Number of bits to represent the page size for paged spaces. The value of 13
+// gives 8K bytes per page.
+const int kPageSizeBits = 13;
+
+
// Constants relevant to double precision floating point numbers.
// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
@@ -294,7 +307,7 @@
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
-enum VisitMode { VISIT_ALL, VISIT_ONLY_STRONG };
+enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
// A CodeDesc describes a buffer holding instructions and relocation
@@ -366,6 +379,12 @@
};
+enum CallFunctionFlags {
+ NO_CALL_FUNCTION_FLAGS = 0,
+ RECEIVER_MIGHT_BE_VALUE = 1 << 0 // Receiver might not be a JSObject.
+};
+
+
// Type of properties.
// Order of properties is significant.
// Must fit in the BitField PropertyDetails::TypeField.
@@ -450,6 +469,10 @@
#define POINTER_SIZE_ALIGN(value) \
(((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
+// MAP_SIZE_ALIGN returns the value aligned as a map pointer.
+#define MAP_SIZE_ALIGN(value) \
+ (((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
+
// The expression OFFSET_OF(type, field) computes the byte-offset
// of the specified field relative to the containing type. This
// corresponds to 'offsetof' (in stddef.h), except that it doesn't
diff --git a/src/heap-inl.h b/src/heap-inl.h
index eccd5ee..bd4f86b 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -54,7 +54,8 @@
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
ASSERT(space != NEW_SPACE ||
retry_space == OLD_POINTER_SPACE ||
- retry_space == OLD_DATA_SPACE);
+ retry_space == OLD_DATA_SPACE ||
+ retry_space == LO_SPACE);
#ifdef DEBUG
if (FLAG_gc_interval >= 0 &&
!disallow_allocation_failure_ &&
@@ -109,6 +110,19 @@
}
+void Heap::FinalizeExternalString(String* string) {
+ ASSERT(string->IsExternalString());
+ v8::String::ExternalStringResourceBase** resource_addr =
+ reinterpret_cast<v8::String::ExternalStringResourceBase**>(
+ reinterpret_cast<byte*>(string) +
+ ExternalString::kResourceOffset -
+ kHeapObjectTag);
+ delete *resource_addr;
+ // Clear the resource pointer in the string.
+ *resource_addr = NULL;
+}
+
+
Object* Heap::AllocateRawMap() {
#ifdef DEBUG
Counters::objs_since_last_full.Increment();
@@ -116,6 +130,12 @@
#endif
Object* result = map_space_->AllocateRaw(Map::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
+#ifdef DEBUG
+ if (!result->IsFailure()) {
+ // Maps have their own alignment.
+ CHECK((OffsetFrom(result) & kMapAlignmentMask) == kHeapObjectTag);
+ }
+#endif
return result;
}
@@ -177,12 +197,23 @@
// other object types are promoted to old pointer space. We do not use
// object->IsHeapNumber() and object->IsSeqString() because we already
// know that object has the heap object tag.
- ASSERT((type != CODE_TYPE) && (type != MAP_TYPE));
- bool has_pointers =
- type != HEAP_NUMBER_TYPE &&
- (type >= FIRST_NONSTRING_TYPE ||
- (type & kStringRepresentationMask) != kSeqStringTag);
- return has_pointers ? OLD_POINTER_SPACE : OLD_DATA_SPACE;
+
+ // These objects are never allocated in new space.
+ ASSERT(type != MAP_TYPE);
+ ASSERT(type != CODE_TYPE);
+ ASSERT(type != ODDBALL_TYPE);
+ ASSERT(type != JS_GLOBAL_PROPERTY_CELL_TYPE);
+
+ if (type < FIRST_NONSTRING_TYPE) {
+ // There are three string representations: sequential strings, cons
+ // strings, and external strings. Only cons strings contain
+ // non-map-word pointers to heap objects.
+ return ((type & kStringRepresentationMask) == kConsStringTag)
+ ? OLD_POINTER_SPACE
+ : OLD_DATA_SPACE;
+ } else {
+ return (type <= LAST_DATA_TYPE) ? OLD_DATA_SPACE : OLD_POINTER_SPACE;
+ }
}
@@ -321,6 +352,56 @@
#endif
+void ExternalStringTable::AddString(String* string) {
+ ASSERT(string->IsExternalString());
+ if (Heap::InNewSpace(string)) {
+ new_space_strings_.Add(string);
+ } else {
+ old_space_strings_.Add(string);
+ }
+}
+
+
+void ExternalStringTable::Iterate(ObjectVisitor* v) {
+ if (!new_space_strings_.is_empty()) {
+ Object** start = &new_space_strings_[0];
+ v->VisitPointers(start, start + new_space_strings_.length());
+ }
+ if (!old_space_strings_.is_empty()) {
+ Object** start = &old_space_strings_[0];
+ v->VisitPointers(start, start + old_space_strings_.length());
+ }
+}
+
+
+// Verify() is inline to avoid ifdef-s around its calls in release
+// mode.
+void ExternalStringTable::Verify() {
+#ifdef DEBUG
+ for (int i = 0; i < new_space_strings_.length(); ++i) {
+ ASSERT(Heap::InNewSpace(new_space_strings_[i]));
+ ASSERT(new_space_strings_[i] != Heap::raw_unchecked_null_value());
+ }
+ for (int i = 0; i < old_space_strings_.length(); ++i) {
+ ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
+ ASSERT(old_space_strings_[i] != Heap::raw_unchecked_null_value());
+ }
+#endif
+}
+
+
+void ExternalStringTable::AddOldString(String* string) {
+ ASSERT(string->IsExternalString());
+ ASSERT(!Heap::InNewSpace(string));
+ old_space_strings_.Add(string);
+}
+
+
+void ExternalStringTable::ShrinkNewStrings(int position) {
+ new_space_strings_.Rewind(position);
+ Verify();
+}
+
} } // namespace v8::internal
#endif // V8_HEAP_INL_H_
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index bd1cd2d..b615055 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -667,8 +667,9 @@
can_log_ = true;
}
-void ProducerHeapProfile::RecordJSObjectAllocation(Object* obj) {
- if (!can_log_ || !FLAG_log_producers) return;
+void ProducerHeapProfile::DoRecordJSObjectAllocation(Object* obj) {
+ ASSERT(FLAG_log_producers);
+ if (!can_log_) return;
int framesCount = 0;
for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
++framesCount;
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index f8cb04d..c615942 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -261,8 +261,12 @@
class ProducerHeapProfile : public AllStatic {
public:
static void Setup();
- static void RecordJSObjectAllocation(Object* obj);
+ static void RecordJSObjectAllocation(Object* obj) {
+ if (FLAG_log_producers) DoRecordJSObjectAllocation(obj);
+ }
+
private:
+ static void DoRecordJSObjectAllocation(Object* obj);
static bool can_log_;
};
diff --git a/src/heap.cc b/src/heap.cc
index 4e4cd1c..fba2e87 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -479,6 +479,65 @@
}
+void Heap::ReserveSpace(
+ int new_space_size,
+ int pointer_space_size,
+ int data_space_size,
+ int code_space_size,
+ int map_space_size,
+ int cell_space_size,
+ int large_object_size) {
+ NewSpace* new_space = Heap::new_space();
+ PagedSpace* old_pointer_space = Heap::old_pointer_space();
+ PagedSpace* old_data_space = Heap::old_data_space();
+ PagedSpace* code_space = Heap::code_space();
+ PagedSpace* map_space = Heap::map_space();
+ PagedSpace* cell_space = Heap::cell_space();
+ LargeObjectSpace* lo_space = Heap::lo_space();
+ bool gc_performed = true;
+ while (gc_performed) {
+ gc_performed = false;
+ if (!new_space->ReserveSpace(new_space_size)) {
+ Heap::CollectGarbage(new_space_size, NEW_SPACE);
+ gc_performed = true;
+ }
+ if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
+ Heap::CollectGarbage(pointer_space_size, OLD_POINTER_SPACE);
+ gc_performed = true;
+ }
+ if (!(old_data_space->ReserveSpace(data_space_size))) {
+ Heap::CollectGarbage(data_space_size, OLD_DATA_SPACE);
+ gc_performed = true;
+ }
+ if (!(code_space->ReserveSpace(code_space_size))) {
+ Heap::CollectGarbage(code_space_size, CODE_SPACE);
+ gc_performed = true;
+ }
+ if (!(map_space->ReserveSpace(map_space_size))) {
+ Heap::CollectGarbage(map_space_size, MAP_SPACE);
+ gc_performed = true;
+ }
+ if (!(cell_space->ReserveSpace(cell_space_size))) {
+ Heap::CollectGarbage(cell_space_size, CELL_SPACE);
+ gc_performed = true;
+ }
+ // We add a slack-factor of 2 in order to have space for the remembered
+ // set and a series of large-object allocations that are only just larger
+ // than the page size.
+ large_object_size *= 2;
+ // The ReserveSpace method on the large object space checks how much
+ // we can expand the old generation. This includes expansion caused by
+ // allocation in the other spaces.
+ large_object_size += cell_space_size + map_space_size + code_space_size +
+ data_space_size + pointer_space_size;
+ if (!(lo_space->ReserveSpace(large_object_size))) {
+ Heap::CollectGarbage(large_object_size, LO_SPACE);
+ gc_performed = true;
+ }
+ }
+}
+
+
void Heap::EnsureFromSpaceIsCommitted() {
if (new_space_.CommitFromSpaceIfNeeded()) return;
@@ -576,6 +635,8 @@
Top::MarkCompactPrologue(is_compacting);
ThreadManager::MarkCompactPrologue(is_compacting);
+
+ if (is_compacting) FlushNumberStringCache();
}
@@ -733,7 +794,7 @@
ScavengeVisitor scavenge_visitor;
// Copy roots.
- IterateRoots(&scavenge_visitor, VISIT_ALL);
+ IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
@@ -753,6 +814,64 @@
}
}
+ new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+
+ ScavengeExternalStringTable();
+ ASSERT(new_space_front == new_space_.top());
+
+ // Set age mark.
+ new_space_.set_age_mark(new_space_.top());
+
+ // Update how much has survived scavenge.
+ survived_since_last_expansion_ +=
+ (PromotedSpaceSize() - survived_watermark) + new_space_.Size();
+
+ LOG(ResourceEvent("scavenge", "end"));
+
+ gc_state_ = NOT_IN_GC;
+}
+
+
+void Heap::ScavengeExternalStringTable() {
+ ExternalStringTable::Verify();
+
+ if (ExternalStringTable::new_space_strings_.is_empty()) return;
+
+ Object** start = &ExternalStringTable::new_space_strings_[0];
+ Object** end = start + ExternalStringTable::new_space_strings_.length();
+ Object** last = start;
+
+ for (Object** p = start; p < end; ++p) {
+ ASSERT(Heap::InFromSpace(*p));
+ MapWord first_word = HeapObject::cast(*p)->map_word();
+
+ if (!first_word.IsForwardingAddress()) {
+ // Unreachable external string can be finalized.
+ FinalizeExternalString(String::cast(*p));
+ continue;
+ }
+
+ // String is still reachable.
+ String* target = String::cast(first_word.ToForwardingAddress());
+ ASSERT(target->IsExternalString());
+
+ if (Heap::InNewSpace(target)) {
+ // String is still in new space. Update the table entry.
+ *last = target;
+ ++last;
+ } else {
+ // String got promoted. Move it to the old string list.
+ ExternalStringTable::AddOldString(target);
+ }
+ }
+
+ ASSERT(last <= end);
+ ExternalStringTable::ShrinkNewStrings(static_cast<int>(last - start));
+}
+
+
+Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
+ Address new_space_front) {
do {
ASSERT(new_space_front <= new_space_.top());
@@ -761,7 +880,7 @@
// queue is empty.
while (new_space_front < new_space_.top()) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
- object->Iterate(&scavenge_visitor);
+ object->Iterate(scavenge_visitor);
new_space_front += object->Size();
}
@@ -783,7 +902,7 @@
RecordCopiedObject(target);
#endif
// Visit the newly copied object for pointers to new space.
- target->Iterate(&scavenge_visitor);
+ target->Iterate(scavenge_visitor);
UpdateRSet(target);
}
@@ -791,16 +910,7 @@
// (there are currently no more unswept promoted objects).
} while (new_space_front < new_space_.top());
- // Set age mark.
- new_space_.set_age_mark(new_space_.top());
-
- // Update how much has survived scavenge.
- survived_since_last_expansion_ +=
- (PromotedSpaceSize() - survived_watermark) + new_space_.Size();
-
- LOG(ResourceEvent("scavenge", "end"));
-
- gc_state_ = NOT_IN_GC;
+ return new_space_front;
}
@@ -1094,6 +1204,13 @@
map->set_unused_property_fields(0);
map->set_bit_field(0);
map->set_bit_field2(0);
+
+ // If the map object is aligned fill the padding area with Smi 0 objects.
+ if (Map::kPadStart < Map::kSize) {
+ memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
+ 0,
+ Map::kSize - Map::kPadStart);
+ }
return map;
}
@@ -1299,9 +1416,6 @@
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- // New space can't cope with forced allocation.
- if (always_allocate()) space = OLD_DATA_SPACE;
-
Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
@@ -1521,10 +1635,7 @@
CreateFixedStubs();
- // Allocate the number->string conversion cache
- obj = AllocateFixedArray(kNumberStringCacheSize * 2);
- if (obj->IsFailure()) return false;
- set_number_string_cache(FixedArray::cast(obj));
+ if (InitializeNumberStringCache()->IsFailure()) return false;
// Allocate cache for single character strings.
obj = AllocateFixedArray(String::kMaxAsciiCharCode+1);
@@ -1555,25 +1666,45 @@
}
+Object* Heap::InitializeNumberStringCache() {
+ // Compute the size of the number string cache based on the max heap size.
+ // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
+ // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
+ int number_string_cache_size = max_semispace_size_ / 512;
+ number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
+ Object* obj = AllocateFixedArray(number_string_cache_size * 2);
+ if (!obj->IsFailure()) set_number_string_cache(FixedArray::cast(obj));
+ return obj;
+}
+
+
+void Heap::FlushNumberStringCache() {
+ // Flush the number to string cache.
+ int len = number_string_cache()->length();
+ for (int i = 0; i < len; i++) {
+ number_string_cache()->set_undefined(i);
+ }
+}
+
+
static inline int double_get_hash(double d) {
DoubleRepresentation rep(d);
- return ((static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32)) &
- (Heap::kNumberStringCacheSize - 1));
+ return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
}
static inline int smi_get_hash(Smi* smi) {
- return (smi->value() & (Heap::kNumberStringCacheSize - 1));
+ return smi->value();
}
-
Object* Heap::GetNumberStringCache(Object* number) {
int hash;
+ int mask = (number_string_cache()->length() >> 1) - 1;
if (number->IsSmi()) {
- hash = smi_get_hash(Smi::cast(number));
+ hash = smi_get_hash(Smi::cast(number)) & mask;
} else {
- hash = double_get_hash(number->Number());
+ hash = double_get_hash(number->Number()) & mask;
}
Object* key = number_string_cache()->get(hash * 2);
if (key == number) {
@@ -1589,11 +1720,12 @@
void Heap::SetNumberStringCache(Object* number, String* string) {
int hash;
+ int mask = (number_string_cache()->length() >> 1) - 1;
if (number->IsSmi()) {
- hash = smi_get_hash(Smi::cast(number));
+ hash = smi_get_hash(Smi::cast(number)) & mask;
number_string_cache()->set(hash * 2, number, SKIP_WRITE_BARRIER);
} else {
- hash = double_get_hash(number->Number());
+ hash = double_get_hash(number->Number()) & mask;
number_string_cache()->set(hash * 2, number);
}
number_string_cache()->set(hash * 2 + 1, string);
@@ -1707,7 +1839,6 @@
// Statically ensure that it is safe to allocate proxies in paged spaces.
STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- if (always_allocate()) space = OLD_DATA_SPACE;
Object* result = Allocate(proxy_map(), space);
if (result->IsFailure()) return result;
@@ -1847,8 +1978,7 @@
Map* map = is_ascii ? cons_ascii_string_map() : cons_string_map();
- Object* result = Allocate(map,
- always_allocate() ? OLD_POINTER_SPACE : NEW_SPACE);
+ Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result;
ConsString* cons_string = ConsString::cast(result);
WriteBarrierMode mode = cons_string->GetWriteBarrierMode();
@@ -1912,8 +2042,7 @@
}
Map* map = external_ascii_string_map();
- Object* result = Allocate(map,
- always_allocate() ? OLD_DATA_SPACE : NEW_SPACE);
+ Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result;
ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
@@ -1934,8 +2063,7 @@
}
Map* map = Heap::external_string_map();
- Object* result = Allocate(map,
- always_allocate() ? OLD_DATA_SPACE : NEW_SPACE);
+ Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result;
ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
@@ -1970,15 +2098,16 @@
Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
+ if (length < 0 || length > ByteArray::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
if (pretenure == NOT_TENURED) {
return AllocateByteArray(length);
}
int size = ByteArray::SizeFor(length);
- AllocationSpace space =
- size > MaxObjectSizeInPagedSpace() ? LO_SPACE : OLD_DATA_SPACE;
-
- Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
-
+ Object* result = (size <= MaxObjectSizeInPagedSpace())
+ ? old_data_space_->AllocateRaw(size)
+ : lo_space_->AllocateRaw(size);
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(byte_array_map());
@@ -1988,15 +2117,13 @@
Object* Heap::AllocateByteArray(int length) {
+ if (length < 0 || length > ByteArray::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
int size = ByteArray::SizeFor(length);
AllocationSpace space =
- size > MaxObjectSizeInPagedSpace() ? LO_SPACE : NEW_SPACE;
-
- // New space can't cope with forced allocation.
- if (always_allocate()) space = LO_SPACE;
-
+ (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
-
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(byte_array_map());
@@ -2021,12 +2148,7 @@
uint8_t* external_pointer,
PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-
- // New space can't cope with forced allocation.
- if (always_allocate()) space = OLD_DATA_SPACE;
-
Object* result = AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
-
if (result->IsFailure()) return result;
reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
@@ -2042,14 +2164,9 @@
void* external_pointer,
PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-
- // New space can't cope with forced allocation.
- if (always_allocate()) space = OLD_DATA_SPACE;
-
Object* result = AllocateRaw(ExternalArray::kAlignedSize,
space,
OLD_DATA_SPACE);
-
if (result->IsFailure()) return result;
reinterpret_cast<ExternalArray*>(result)->set_map(
@@ -2138,9 +2255,12 @@
Object* Heap::Allocate(Map* map, AllocationSpace space) {
ASSERT(gc_state_ == NOT_IN_GC);
ASSERT(map->instance_type() != MAP_TYPE);
- Object* result = AllocateRaw(map->instance_size(),
- space,
- TargetSpaceId(map->instance_type()));
+ // If allocation failures are disallowed, we may allocate in a different
+ // space when new space is full and the object is not a large object.
+ AllocationSpace retry_space =
+ (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
+ Object* result =
+ AllocateRaw(map->instance_size(), space, retry_space);
if (result->IsFailure()) return result;
HeapObject::cast(result)->set_map(map);
#ifdef ENABLE_LOGGING_AND_PROFILING
@@ -2185,8 +2305,11 @@
Object* Heap::AllocateFunction(Map* function_map,
SharedFunctionInfo* shared,
- Object* prototype) {
- Object* result = Allocate(function_map, OLD_POINTER_SPACE);
+ Object* prototype,
+ PretenureFlag pretenure) {
+ AllocationSpace space =
+ (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
+ Object* result = Allocate(function_map, space);
if (result->IsFailure()) return result;
return InitializeFunction(JSFunction::cast(result), shared, prototype);
}
@@ -2203,10 +2326,14 @@
JSObject* boilerplate =
Top::context()->global_context()->arguments_boilerplate();
- // Make the clone.
- Map* map = boilerplate->map();
- int object_size = map->instance_size();
- Object* result = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
+ // Check that the size of the boilerplate matches our
+ // expectations. The ArgumentsAccessStub::GenerateNewObject relies
+ // on the size being a known constant.
+ ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
+
+ // Do the allocation.
+ Object* result =
+ AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
if (result->IsFailure()) return result;
// Copy the content. The arguments boilerplate doesn't have any
@@ -2214,7 +2341,7 @@
// barrier here.
CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
reinterpret_cast<Object**>(boilerplate->address()),
- object_size);
+ kArgumentsObjectSize);
// Set the two properties.
JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
@@ -2321,7 +2448,6 @@
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
- if (always_allocate()) space = OLD_POINTER_SPACE;
Object* obj = Allocate(map, space);
if (obj->IsFailure()) return obj;
@@ -2596,12 +2722,16 @@
Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
int chars,
uint32_t hash_field) {
+ ASSERT(chars >= 0);
// Ensure the chars matches the number of characters in the buffer.
ASSERT(static_cast<unsigned>(chars) == buffer->Length());
// Determine whether the string is ascii.
bool is_ascii = true;
- while (buffer->has_more() && is_ascii) {
- if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) is_ascii = false;
+ while (buffer->has_more()) {
+ if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
+ is_ascii = false;
+ break;
+ }
}
buffer->Rewind();
@@ -2610,17 +2740,23 @@
Map* map;
if (is_ascii) {
+ if (chars > SeqAsciiString::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
map = ascii_symbol_map();
size = SeqAsciiString::SizeFor(chars);
} else {
+ if (chars > SeqTwoByteString::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
map = symbol_map();
size = SeqTwoByteString::SizeFor(chars);
}
// Allocate string.
- AllocationSpace space =
- (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_DATA_SPACE;
- Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
+ Object* result = (size > MaxObjectSizeInPagedSpace())
+ ? lo_space_->AllocateRaw(size)
+ : old_data_space_->AllocateRaw(size);
if (result->IsFailure()) return result;
reinterpret_cast<HeapObject*>(result)->set_map(map);
@@ -2640,22 +2776,28 @@
Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-
- // New space can't cope with forced allocation.
- if (always_allocate()) space = OLD_DATA_SPACE;
+ if (length < 0 || length > SeqAsciiString::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
int size = SeqAsciiString::SizeFor(length);
+ ASSERT(size <= SeqAsciiString::kMaxSize);
- Object* result = Failure::OutOfMemoryException();
+ AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ AllocationSpace retry_space = OLD_DATA_SPACE;
+
if (space == NEW_SPACE) {
- result = size <= kMaxObjectSizeInNewSpace
- ? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRaw(size);
- } else {
- if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
- result = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (size > kMaxObjectSizeInNewSpace) {
+ // Allocate in large object space, retry space will be ignored.
+ space = LO_SPACE;
+ } else if (size > MaxObjectSizeInPagedSpace()) {
+ // Allocate in new space, retry in large object space.
+ retry_space = LO_SPACE;
+ }
+ } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
+ space = LO_SPACE;
}
+ Object* result = AllocateRaw(size, space, retry_space);
if (result->IsFailure()) return result;
// Partially initialize the object.
@@ -2668,22 +2810,26 @@
Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-
- // New space can't cope with forced allocation.
- if (always_allocate()) space = OLD_DATA_SPACE;
-
- int size = SeqTwoByteString::SizeFor(length);
-
- Object* result = Failure::OutOfMemoryException();
- if (space == NEW_SPACE) {
- result = size <= kMaxObjectSizeInNewSpace
- ? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRaw(size);
- } else {
- if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
- result = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (length < 0 || length > SeqTwoByteString::kMaxLength) {
+ return Failure::OutOfMemoryException();
}
+ int size = SeqTwoByteString::SizeFor(length);
+ ASSERT(size <= SeqTwoByteString::kMaxSize);
+ AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ AllocationSpace retry_space = OLD_DATA_SPACE;
+
+ if (space == NEW_SPACE) {
+ if (size > kMaxObjectSizeInNewSpace) {
+ // Allocate in large object space, retry space will be ignored.
+ space = LO_SPACE;
+ } else if (size > MaxObjectSizeInPagedSpace()) {
+ // Allocate in new space, retry in large object space.
+ retry_space = LO_SPACE;
+ }
+ } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
+ space = LO_SPACE;
+ }
+ Object* result = AllocateRaw(size, space, retry_space);
if (result->IsFailure()) return result;
// Partially initialize the object.
@@ -2707,6 +2853,9 @@
Object* Heap::AllocateRawFixedArray(int length) {
+ if (length < 0 || length > FixedArray::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
// Use the general function if we're forced to always allocate.
if (always_allocate()) return AllocateFixedArray(length, TENURED);
// Allocate the raw data for a fixed array.
@@ -2758,29 +2907,47 @@
Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
+ ASSERT(length >= 0);
ASSERT(empty_fixed_array()->IsFixedArray());
+ if (length < 0 || length > FixedArray::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
if (length == 0) return empty_fixed_array();
- // New space can't cope with forced allocation.
- if (always_allocate()) pretenure = TENURED;
-
+ AllocationSpace space =
+ (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
int size = FixedArray::SizeFor(length);
+ if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
+ // Too big for new space.
+ space = LO_SPACE;
+ } else if (space == OLD_POINTER_SPACE &&
+ size > MaxObjectSizeInPagedSpace()) {
+ // Too big for old pointer space.
+ space = LO_SPACE;
+ }
+
+ // Specialize allocation for the space.
Object* result = Failure::OutOfMemoryException();
- if (pretenure != TENURED) {
- result = size <= kMaxObjectSizeInNewSpace
- ? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRawFixedArray(size);
- }
- if (result->IsFailure()) {
- if (size > MaxObjectSizeInPagedSpace()) {
- result = lo_space_->AllocateRawFixedArray(size);
- } else {
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- result = AllocateRaw(size, space, OLD_POINTER_SPACE);
+ if (space == NEW_SPACE) {
+ // We cannot use Heap::AllocateRaw() because it will not properly
+ // allocate extra remembered set bits if always_allocate() is true and
+ // new space allocation fails.
+ result = new_space_.AllocateRaw(size);
+ if (result->IsFailure() && always_allocate()) {
+ if (size <= MaxObjectSizeInPagedSpace()) {
+ result = old_pointer_space_->AllocateRaw(size);
+ } else {
+ result = lo_space_->AllocateRawFixedArray(size);
+ }
}
- if (result->IsFailure()) return result;
+ } else if (space == OLD_POINTER_SPACE) {
+ result = old_pointer_space_->AllocateRaw(size);
+ } else {
+ ASSERT(space == LO_SPACE);
+ result = lo_space_->AllocateRawFixedArray(size);
}
+ if (result->IsFailure()) return result;
+
// Initialize the object.
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
@@ -3175,6 +3342,11 @@
IterateStrongRoots(v, mode);
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
v->Synchronize("symbol_table");
+ if (mode != VISIT_ALL_IN_SCAVENGE) {
+ // Scavenge collections have special processing for this.
+ ExternalStringTable::Iterate(v);
+ }
+ v->Synchronize("external_string_table");
}
@@ -3203,11 +3375,12 @@
HandleScopeImplementer::Iterate(v);
v->Synchronize("handlescope");
- // Iterate over the builtin code objects and code stubs in the heap. Note
- // that it is not strictly necessary to iterate over code objects on
- // scavenge collections. We still do it here because this same function
- // is used by the mark-sweep collector and the deserializer.
- Builtins::IterateBuiltins(v);
+ // Iterate over the builtin code objects and code stubs in the
+ // heap. Note that it is not necessary to iterate over code objects
+ // on scavenge collections.
+ if (mode != VISIT_ALL_IN_SCAVENGE) {
+ Builtins::IterateBuiltins(v);
+ }
v->Synchronize("builtins");
// Iterate over global handles.
@@ -3369,7 +3542,10 @@
if (!code_space_->Setup(NULL, 0)) return false;
// Initialize map space.
- map_space_ = new MapSpace(kMaxMapSpaceSize, MAP_SPACE);
+ map_space_ = new MapSpace(FLAG_use_big_map_space
+ ? max_old_generation_size_
+ : (MapSpace::kMaxMapPageIndex + 1) * Page::kPageSize,
+ MAP_SPACE);
if (map_space_ == NULL) return false;
if (!map_space_->Setup(NULL, 0)) return false;
@@ -3424,6 +3600,8 @@
void Heap::TearDown() {
GlobalHandles::TearDown();
+ ExternalStringTable::TearDown();
+
new_space_.TearDown();
if (old_pointer_space_ != NULL) {
@@ -3839,8 +4017,8 @@
// Triggers a depth-first traversal of reachable objects from roots
// and finds a path to a specific heap object and prints it.
-void Heap::TracePathToObject() {
- search_target = NULL;
+void Heap::TracePathToObject(Object* target) {
+ search_target = target;
search_for_any_global = false;
MarkRootVisitor root_visitor;
@@ -3907,8 +4085,8 @@
int KeyedLookupCache::Hash(Map* map, String* name) {
// Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> 2;
- return (addr_hash ^ name->Hash()) % kLength;
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
+ return (addr_hash ^ name->Hash()) & kCapacityMask;
}
@@ -3991,4 +4169,35 @@
}
+void ExternalStringTable::CleanUp() {
+ int last = 0;
+ for (int i = 0; i < new_space_strings_.length(); ++i) {
+ if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
+ if (Heap::InNewSpace(new_space_strings_[i])) {
+ new_space_strings_[last++] = new_space_strings_[i];
+ } else {
+ old_space_strings_.Add(new_space_strings_[i]);
+ }
+ }
+ new_space_strings_.Rewind(last);
+ last = 0;
+ for (int i = 0; i < old_space_strings_.length(); ++i) {
+ if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
+ ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
+ old_space_strings_[last++] = old_space_strings_[i];
+ }
+ old_space_strings_.Rewind(last);
+ Verify();
+}
+
+
+void ExternalStringTable::TearDown() {
+ new_space_strings_.Free();
+ old_space_strings_.Free();
+}
+
+
+List<Object*> ExternalStringTable::new_space_strings_;
+List<Object*> ExternalStringTable::old_space_strings_;
+
} } // namespace v8::internal
diff --git a/src/heap.h b/src/heap.h
index b37fe4b..1f04444 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -269,7 +269,7 @@
return reinterpret_cast<Address>(&always_allocate_scope_depth_);
}
static bool linear_allocation() {
- return linear_allocation_scope_depth_ != 0;
+ return linear_allocation_scope_depth_ != 0;
}
static Address* NewSpaceAllocationTopAddress() {
@@ -487,9 +487,12 @@
// Please note this does not perform a garbage collection.
static Object* AllocateFunction(Map* function_map,
SharedFunctionInfo* shared,
- Object* prototype);
+ Object* prototype,
+ PretenureFlag pretenure = TENURED);
// Indicies for direct access into argument objects.
+ static const int kArgumentsObjectSize =
+ JSObject::kHeaderSize + 2 * kPointerSize;
static const int arguments_callee_index = 0;
static const int arguments_length_index = 1;
@@ -566,6 +569,10 @@
static Object* AllocateExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource);
+ // Finalizes an external string by deleting the associated external
+ // data and clearing the resource pointer.
+ static inline void FinalizeExternalString(String* string);
+
// Allocates an uninitialized object. The memory is non-executable if the
// hardware and OS allow.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -778,7 +785,7 @@
return disallow_allocation_failure_;
}
- static void TracePathToObject();
+ static void TracePathToObject(Object* target);
static void TracePathToGlobal();
#endif
@@ -797,9 +804,27 @@
// Rebuild remembered set in old and map spaces.
static void RebuildRSets();
+ // Update an old object's remembered set
+ static int UpdateRSet(HeapObject* obj);
+
// Commits from space if it is uncommitted.
static void EnsureFromSpaceIsCommitted();
+ // Support for partial snapshots. After calling this we can allocate a
+ // certain number of bytes using only linear allocation (with a
+ // LinearAllocationScope and an AlwaysAllocateScope) without using freelists
+ // or causing a GC. It returns true of space was reserved or false if a GC is
+ // needed. For paged spaces the space requested must include the space wasted
+ // at the end of each page when allocating linearly.
+ static void ReserveSpace(
+ int new_space_size,
+ int pointer_space_size,
+ int data_space_size,
+ int code_space_size,
+ int map_space_size,
+ int cell_space_size,
+ int large_object_size);
+
//
// Support for the API.
//
@@ -813,9 +838,6 @@
// Update the cache with a new number-string pair.
static void SetNumberStringCache(Object* number, String* str);
- // Entries in the cache. Must be a power of 2.
- static const int kNumberStringCacheSize = 64;
-
// Adjusts the amount of registered external memory.
// Returns the adjusted value.
static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
@@ -830,11 +852,15 @@
> old_gen_promotion_limit_;
}
+ static intptr_t OldGenerationSpaceAvailable() {
+ return old_gen_allocation_limit_ -
+ (PromotedSpaceSize() + PromotedExternalMemorySize());
+ }
+
// True if we have reached the allocation limit in the old generation that
// should artificially cause a GC right now.
static bool OldGenerationAllocationLimitReached() {
- return (PromotedSpaceSize() + PromotedExternalMemorySize())
- > old_gen_allocation_limit_;
+ return OldGenerationSpaceAvailable() < 0;
}
// Can be called when the embedding application is idle.
@@ -883,11 +909,6 @@
static int linear_allocation_scope_depth_;
static bool context_disposed_pending_;
- // The number of MapSpace pages is limited by the way we pack
- // Map pointers during GC.
- static const int kMaxMapSpaceSize =
- (1 << MapWord::kMapPageIndexBits) * Page::kPageSize;
-
#if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 512*KB;
#else
@@ -1039,6 +1060,9 @@
// Performs a minor collection in new generation.
static void Scavenge();
+ static void ScavengeExternalStringTable();
+ static Address DoScavenge(ObjectVisitor* scavenge_visitor,
+ Address new_space_front);
// Performs a major collection in the whole heap.
static void MarkCompact(GCTracer* tracer);
@@ -1050,9 +1074,9 @@
// Helper function used by CopyObject to copy a source object to an
// allocated target object and update the forwarding pointer in the source
// object. Returns the target object.
- static HeapObject* MigrateObject(HeapObject* source,
- HeapObject* target,
- int size);
+ static inline HeapObject* MigrateObject(HeapObject* source,
+ HeapObject* target,
+ int size);
// Helper function that governs the promotion policy from new space to
// old. If the object's old address lies below the new space's age
@@ -1068,9 +1092,6 @@
static void ReportStatisticsAfterGC();
#endif
- // Update an old object's remembered set
- static int UpdateRSet(HeapObject* obj);
-
// Rebuild remembered set in an old space.
static void RebuildRSets(PagedSpace* space);
@@ -1093,6 +1114,12 @@
SharedFunctionInfo* shared,
Object* prototype);
+
+ // Initializes the number to string cache based on the max semispace size.
+ static Object* InitializeNumberStringCache();
+ // Flush the number to string cache.
+ static void FlushNumberStringCache();
+
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
@@ -1224,7 +1251,7 @@
// Space iterator for iterating over all the paged spaces of the heap:
-// Map space, old pointer space, old data space and code space.
+// Map space, old pointer space, old data space, code space and cell space.
// Returns each space in turn, and null when it is done.
class PagedSpaces BASE_EMBEDDED {
public:
@@ -1293,17 +1320,33 @@
// Clear the cache.
static void Clear();
+
+ static const int kLength = 64;
+ static const int kCapacityMask = kLength - 1;
+ static const int kMapHashShift = 2;
+
private:
static inline int Hash(Map* map, String* name);
- static const int kLength = 64;
+
+ // Get the address of the keys and field_offsets arrays. Used in
+ // generated code to perform cache lookups.
+ static Address keys_address() {
+ return reinterpret_cast<Address>(&keys_);
+ }
+
+ static Address field_offsets_address() {
+ return reinterpret_cast<Address>(&field_offsets_);
+ }
+
struct Key {
Map* map;
String* name;
};
static Key keys_[kLength];
static int field_offsets_[kLength];
-};
+ friend class ExternalReference;
+};
// Cache for mapping (array, property name) into descriptor index.
@@ -1623,6 +1666,39 @@
};
+// External strings table is a place where all external strings are
+// registered. We need to keep track of such strings to properly
+// finalize them.
+class ExternalStringTable : public AllStatic {
+ public:
+ // Registers an external string.
+ inline static void AddString(String* string);
+
+ inline static void Iterate(ObjectVisitor* v);
+
+ // Restores internal invariant and gets rid of collected strings.
+ // Must be called after each Iterate() that modified the strings.
+ static void CleanUp();
+
+ // Destroys all allocated memory.
+ static void TearDown();
+
+ private:
+ friend class Heap;
+
+ inline static void Verify();
+
+ inline static void AddOldString(String* string);
+
+ // Notifies the table that only a prefix of the new list is valid.
+ inline static void ShrinkNewStrings(int position);
+
+ // To speed up scavenge collections new space string are kept
+ // separate from old space strings.
+ static List<Object*> new_space_strings_;
+ static List<Object*> old_space_strings_;
+};
+
} } // namespace v8::internal
#endif // V8_HEAP_H_
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index d6f5550..2cf469a 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -575,6 +575,7 @@
void Assembler::mov_b(Register dst, const Operand& src) {
+ ASSERT(dst.code() < 4);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x8A);
@@ -592,6 +593,7 @@
void Assembler::mov_b(const Operand& dst, Register src) {
+ ASSERT(src.code() < 4);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x88);
@@ -752,6 +754,14 @@
}
+void Assembler::rep_movs() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF3);
+ EMIT(0xA5);
+}
+
+
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1199,6 +1209,15 @@
}
+void Assembler::subb(Register dst, const Operand& src) {
+ ASSERT(dst.code() < 4);
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x2A);
+ emit_operand(dst, src);
+}
+
+
void Assembler::sub(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1242,6 +1261,14 @@
}
+void Assembler::test_b(Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x84);
+ emit_operand(reg, op);
+}
+
+
void Assembler::test(const Operand& op, const Immediate& imm) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1586,7 +1613,6 @@
// FPU instructions
-
void Assembler::fld(int i) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1675,6 +1701,15 @@
}
+void Assembler::fisttp_d(const Operand& adr) {
+ ASSERT(CpuFeatures::IsEnabled(SSE3));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDD);
+ emit_operand(ecx, adr);
+}
+
+
void Assembler::fist_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2004,6 +2039,17 @@
}
+void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x57);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::comisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2015,6 +2061,50 @@
}
+void Assembler::movdqa(const Operand& dst, XMMRegister src ) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x7F);
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movdqa(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x6F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF3);
+ EMIT(0x0F);
+ EMIT(0x7F);
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movdqu(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF3);
+ EMIT(0x0F);
+ EMIT(0x6F);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::movdbl(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 662ebc9..d675ecf 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -540,6 +540,9 @@
void cmov(Condition cc, Register dst, Handle<Object> handle);
void cmov(Condition cc, Register dst, const Operand& src);
+ // Repetitive string instructions.
+ void rep_movs();
+
// Exchange two registers
void xchg(Register dst, Register src);
@@ -614,12 +617,14 @@
void shr_cl(Register dst);
void subb(const Operand& dst, int8_t imm8);
+ void subb(Register dst, const Operand& src);
void sub(const Operand& dst, const Immediate& x);
void sub(Register dst, const Operand& src);
void sub(const Operand& dst, Register src);
void test(Register reg, const Immediate& imm);
void test(Register reg, const Operand& op);
+ void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
void xor_(Register dst, int32_t imm32);
@@ -693,6 +698,7 @@
void fistp_d(const Operand& adr);
void fisttp_s(const Operand& adr);
+ void fisttp_d(const Operand& adr);
void fabs();
void fchs();
@@ -745,9 +751,15 @@
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
void divsd(XMMRegister dst, XMMRegister src);
+ void xorpd(XMMRegister dst, XMMRegister src);
void comisd(XMMRegister dst, XMMRegister src);
+ void movdqa(XMMRegister dst, const Operand& src);
+ void movdqa(const Operand& dst, XMMRegister src);
+ void movdqu(XMMRegister dst, const Operand& src);
+ void movdqu(const Operand& dst, XMMRegister src);
+
// Use either movsd or movlpd.
void movdbl(XMMRegister dst, const Operand& src);
void movdbl(const Operand& dst, XMMRegister src);
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index a164cfa..2c5b1d1 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -36,15 +36,36 @@
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
- // TODO(428): Don't pass the function in a static variable.
- ExternalReference passed = ExternalReference::builtin_passed_function();
- __ mov(Operand::StaticVariable(passed), edi);
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments excluding receiver
+ // -- edi : called function (only guaranteed when
+ // extra_args requires it)
+ // -- esi : context
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -- ...
+ // -- esp[4 * argc] : first argument (argc == eax)
+ // -- esp[4 * (argc +1)] : receiver
+ // -----------------------------------
- // The actual argument count has already been loaded into register
- // eax, but JumpToRuntime expects eax to contain the number of
- // arguments including the receiver.
- __ inc(eax);
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ Register scratch = ebx;
+ __ pop(scratch); // Save return address.
+ __ push(edi);
+ __ push(scratch); // Restore return address.
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToRuntime expects eax to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ add(Operand(eax), Immediate(num_extra_args + 1));
__ JumpToRuntime(ExternalReference(id));
}
@@ -81,12 +102,13 @@
}
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function) {
// Enter a construct frame.
__ EnterConstructFrame();
// Store a smi-tagged arguments count on the stack.
- __ shl(eax, kSmiTagSize);
+ __ SmiTag(eax);
__ push(eax);
// Push the function to invoke on the stack.
@@ -255,7 +277,7 @@
// Retrieve smi-tagged arguments count from the stack.
__ mov(eax, Operand(esp, 0));
- __ shr(eax, kSmiTagSize);
+ __ SmiUntag(eax);
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
@@ -277,8 +299,17 @@
__ j(greater_equal, &loop);
// Call the function.
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION);
+ if (is_api_function) {
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ Handle<Code> code = Handle<Code>(
+ Builtins::builtin(Builtins::HandleApiCallConstruct));
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION);
+ } else {
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, actual, CALL_FUNCTION);
+ }
// Restore context from the frame.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -319,6 +350,16 @@
}
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true);
+}
+
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Clear the context before we push it when entering the JS frame.
@@ -440,8 +481,7 @@
__ EnterInternalFrame(); // preserves eax, ebx, edi
// Store the arguments count on the stack (smi tagged).
- ASSERT(kSmiTag == 0);
- __ shl(eax, kSmiTagSize);
+ __ SmiTag(eax);
__ push(eax);
__ push(edi); // save edi across the call
@@ -452,7 +492,7 @@
// Get the arguments count and untag it.
__ pop(eax);
- __ shr(eax, kSmiTagSize);
+ __ SmiUntag(eax);
__ LeaveInternalFrame();
__ jmp(&patch_receiver);
@@ -472,35 +512,38 @@
__ bind(&done);
}
- // 4. Shift stuff one slot down the stack.
+ // 4. Check that the function really is a function.
+ { Label done;
+ __ test(edi, Operand(edi));
+ __ j(not_zero, &done, taken);
+ __ xor_(ebx, Operand(ebx));
+ // CALL_NON_FUNCTION will expect to find the non-function callee on the
+ // expression stack of the caller. Transfer it from receiver to the
+ // caller's expression stack (and make the first argument the receiver
+ // for CALL_NON_FUNCTION) by decrementing the argument count.
+ __ dec(eax);
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+ __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+ __ bind(&done);
+ }
+
+ // 5. Shift arguments and return address one slot down on the stack
+ // (overwriting the receiver).
{ Label loop;
- __ lea(ecx, Operand(eax, +1)); // +1 ~ copy receiver too
+ __ mov(ecx, eax);
__ bind(&loop);
__ mov(ebx, Operand(esp, ecx, times_4, 0));
__ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
__ dec(ecx);
- __ j(not_zero, &loop);
+ __ j(not_sign, &loop);
+ __ pop(ebx); // Discard copy of return address.
+ __ dec(eax); // One fewer argument (first argument is new receiver).
}
- // 5. Remove TOS (copy of last arguments), but keep return address.
- __ pop(ebx);
- __ pop(ecx);
- __ push(ebx);
- __ dec(eax);
-
- // 6. Check that function really was a function and get the code to
- // call from the function and check that the number of expected
- // arguments matches what we're providing.
- { Label invoke;
- __ test(edi, Operand(edi));
- __ j(not_zero, &invoke, taken);
- __ xor_(ebx, Operand(ebx));
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- __ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
- RelocInfo::CODE_TARGET);
-
- __ bind(&invoke);
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ // 6. Get the code to call from the function and check that the number of
+ // expected arguments matches what we're providing.
+ { __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
@@ -631,7 +674,7 @@
// Invoke the function.
ParameterCount actual(eax);
- __ shr(eax, kSmiTagSize);
+ __ SmiUntag(eax);
__ mov(edi, Operand(ebp, 4 * kPointerSize));
__ InvokeFunction(edi, actual, CALL_FUNCTION);
@@ -828,7 +871,7 @@
// elements_array_end: start of next object
// array_size: size of array (smi)
ASSERT(kSmiTag == 0);
- __ shr(array_size, kSmiTagSize); // Convert from smi to value.
+ __ SmiUntag(array_size); // Convert from smi to value.
__ mov(FieldOperand(elements_array, JSObject::kMapOffset),
Factory::fixed_array_map());
Label not_empty_2, fill_array;
@@ -957,7 +1000,7 @@
// Handle construction of an array from a list of arguments.
__ bind(&argc_two_or_more);
ASSERT(kSmiTag == 0);
- __ shl(eax, kSmiTagSize); // Convet argc to a smi.
+ __ SmiTag(eax); // Convet argc to a smi.
// eax: array_size (smi)
// edi: constructor
// esp[0] : argc
diff --git a/src/ia32/codegen-ia32-inl.h b/src/ia32/codegen-ia32-inl.h
index 44e937a..49c706d 100644
--- a/src/ia32/codegen-ia32-inl.h
+++ b/src/ia32/codegen-ia32-inl.h
@@ -39,16 +39,6 @@
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- GenerateFastMathOp(SIN, args);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- GenerateFastMathOp(COS, args);
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 7c8ff31..240f4da 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -32,7 +32,10 @@
#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
+#include "jsregexp.h"
#include "parser.h"
+#include "regexp-macro-assembler.h"
+#include "regexp-stack.h"
#include "register-allocator-inl.h"
#include "runtime.h"
#include "scopes.h"
@@ -174,12 +177,19 @@
function_return_is_shadowed_ = false;
// Allocate the local context if needed.
- if (scope_->num_heap_slots() > 0) {
+ int heap_slots = scope_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Allocate local context.
// Get outer context and create a new context based on it.
frame_->PushFunction();
- Result context = frame_->CallRuntime(Runtime::kNewContext, 1);
+ Result context;
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ context = frame_->CallStub(&stub, 1);
+ } else {
+ context = frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
// Update context local.
frame_->SaveContextRegister();
@@ -241,6 +251,12 @@
StoreArgumentsObject(true);
}
+ // Initialize ThisFunction reference if present.
+ if (scope_->is_function_scope() && scope_->function() != NULL) {
+ frame_->Push(Factory::the_hole_value());
+ StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ }
+
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
@@ -593,36 +609,33 @@
frame_->Push(&result);
}
- { Reference shadow_ref(this, scope_->arguments_shadow());
- Reference arguments_ref(this, scope_->arguments());
- ASSERT(shadow_ref.is_slot() && arguments_ref.is_slot());
- // Here we rely on the convenient property that references to slot
- // take up zero space in the frame (ie, it doesn't matter that the
- // stored value is actually below the reference on the frame).
- JumpTarget done;
- bool skip_arguments = false;
- if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has
- // already been written to. This can happen if the a function
- // has a local variable named 'arguments'.
- LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
- Result arguments = frame_->Pop();
- if (arguments.is_constant()) {
- // We have to skip updating the arguments object if it has
- // been assigned a proper value.
- skip_arguments = !arguments.handle()->IsTheHole();
- } else {
- __ cmp(Operand(arguments.reg()), Immediate(Factory::the_hole_value()));
- arguments.Unuse();
- done.Branch(not_equal);
- }
+ Variable* arguments = scope_->arguments()->var();
+ Variable* shadow = scope_->arguments_shadow()->var();
+ ASSERT(arguments != NULL && arguments->slot() != NULL);
+ ASSERT(shadow != NULL && shadow->slot() != NULL);
+ JumpTarget done;
+ bool skip_arguments = false;
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
+ // We have to skip storing into the arguments slot if it has already
+ // been written to. This can happen if the a function has a local
+ // variable named 'arguments'.
+ LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
+ Result probe = frame_->Pop();
+ if (probe.is_constant()) {
+ // We have to skip updating the arguments object if it has
+ // been assigned a proper value.
+ skip_arguments = !probe.handle()->IsTheHole();
+ } else {
+ __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
+ probe.Unuse();
+ done.Branch(not_equal);
}
- if (!skip_arguments) {
- arguments_ref.SetValue(NOT_CONST_INIT);
- if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
- }
- shadow_ref.SetValue(NOT_CONST_INIT);
}
+ if (!skip_arguments) {
+ StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+ if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+ }
+ StoreToSlot(shadow->slot(), NOT_CONST_INIT);
return frame_->Pop();
}
@@ -654,15 +667,7 @@
// The expression is either a property or a variable proxy that rewrites
// to a property.
Load(property->obj());
- // We use a named reference if the key is a literal symbol, unless it is
- // a string that can be legally parsed as an integer. This is because
- // otherwise we will not get into the slow case code that handles [] on
- // String objects.
- Literal* literal = property->key()->AsLiteral();
- uint32_t dummy;
- if (literal != NULL &&
- literal->handle()->IsSymbol() &&
- !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+ if (property->key()->IsPropertyName()) {
ref->set_type(Reference::NAMED);
} else {
Load(property->key());
@@ -754,6 +759,11 @@
static void CheckFloatOperands(MacroAssembler* masm,
Label* non_float,
Register scratch);
+ // Takes the operands in edx and eax and loads them as integers in eax
+ // and ecx.
+ static void LoadAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* operand_conversion_failure);
// Test if operands are numbers (smi or HeapNumber objects), and load
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
@@ -763,19 +773,27 @@
const char* GenericBinaryOpStub::GetName() {
- switch (op_) {
- case Token::ADD: return "GenericBinaryOpStub_ADD";
- case Token::SUB: return "GenericBinaryOpStub_SUB";
- case Token::MUL: return "GenericBinaryOpStub_MUL";
- case Token::DIV: return "GenericBinaryOpStub_DIV";
- case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
- case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
- case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
- case Token::SAR: return "GenericBinaryOpStub_SAR";
- case Token::SHL: return "GenericBinaryOpStub_SHL";
- case Token::SHR: return "GenericBinaryOpStub_SHR";
- default: return "GenericBinaryOpStub";
+ if (name_ != NULL) return name_;
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
}
+
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+ "GenericBinaryOpStub_%s_%s%s_%s%s",
+ op_name,
+ overwrite_name,
+ (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
+ args_in_registers_ ? "RegArgs" : "StackArgs",
+ args_reversed_ ? "_R" : "");
+ return name_;
}
@@ -803,14 +821,88 @@
void DeferredInlineBinaryOperation::Generate() {
+ Label done;
+ if (CpuFeatures::IsSupported(SSE2) && ((op_ == Token::ADD) ||
+ (op_ ==Token::SUB) ||
+ (op_ == Token::MUL) ||
+ (op_ == Token::DIV))) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ Label call_runtime, after_alloc_failure;
+ Label left_smi, right_smi, load_right, do_op;
+ __ test(left_, Immediate(kSmiTagMask));
+ __ j(zero, &left_smi);
+ __ cmp(FieldOperand(left_, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ __ j(not_equal, &call_runtime);
+ __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
+ if (mode_ == OVERWRITE_LEFT) {
+ __ mov(dst_, left_);
+ }
+ __ jmp(&load_right);
+
+ __ bind(&left_smi);
+ __ SmiUntag(left_);
+ __ cvtsi2sd(xmm0, Operand(left_));
+ __ SmiTag(left_);
+ if (mode_ == OVERWRITE_LEFT) {
+ Label alloc_failure;
+ __ push(left_);
+ __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
+ __ pop(left_);
+ }
+
+ __ bind(&load_right);
+ __ test(right_, Immediate(kSmiTagMask));
+ __ j(zero, &right_smi);
+ __ cmp(FieldOperand(right_, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ __ j(not_equal, &call_runtime);
+ __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
+ if (mode_ == OVERWRITE_RIGHT) {
+ __ mov(dst_, right_);
+ } else if (mode_ == NO_OVERWRITE) {
+ Label alloc_failure;
+ __ push(left_);
+ __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
+ __ pop(left_);
+ }
+ __ jmp(&do_op);
+
+ __ bind(&right_smi);
+ __ SmiUntag(right_);
+ __ cvtsi2sd(xmm1, Operand(right_));
+ __ SmiTag(right_);
+ if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
+ Label alloc_failure;
+ __ push(left_);
+ __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
+ __ pop(left_);
+ }
+
+ __ bind(&do_op);
+ switch (op_) {
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
+ default: UNREACHABLE();
+ }
+ __ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
+ __ jmp(&done);
+
+ __ bind(&after_alloc_failure);
+ __ pop(left_);
+ __ bind(&call_runtime);
+ }
GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
stub.GenerateCall(masm_, left_, right_);
if (!dst_.is(eax)) __ mov(dst_, eax);
+ __ bind(&done);
}
void CodeGenerator::GenericBinaryOperation(Token::Value op,
- SmiAnalysis* type,
+ StaticType* type,
OverwriteMode overwrite_mode) {
Comment cmnt(masm_, "[ BinaryOperation");
Comment cmnt_token(masm_, Token::String(op));
@@ -1122,8 +1214,7 @@
__ test(edx, Operand(edx));
deferred->Branch(not_zero);
// Tag the result and store it in the quotient register.
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
- __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+ __ SmiTag(eax);
deferred->BindExit();
left->Unuse();
right->Unuse();
@@ -1183,8 +1274,8 @@
// Untag both operands.
__ mov(answer.reg(), left->reg());
- __ sar(answer.reg(), kSmiTagSize);
- __ sar(ecx, kSmiTagSize);
+ __ SmiUntag(answer.reg());
+ __ SmiUntag(ecx);
// Perform the operation.
switch (op) {
case Token::SAR:
@@ -1206,8 +1297,7 @@
// in a case where it is dropped anyway.
__ test(answer.reg(), Immediate(0xc0000000));
__ j(zero, &result_ok);
- ASSERT(kSmiTag == 0);
- __ shl(ecx, kSmiTagSize);
+ __ SmiTag(ecx);
deferred->Jump();
__ bind(&result_ok);
break;
@@ -1218,8 +1308,7 @@
// Check that the *signed* result fits in a smi.
__ cmp(answer.reg(), 0xc0000000);
__ j(positive, &result_ok);
- ASSERT(kSmiTag == 0);
- __ shl(ecx, kSmiTagSize);
+ __ SmiTag(ecx);
deferred->Jump();
__ bind(&result_ok);
break;
@@ -1228,9 +1317,7 @@
UNREACHABLE();
}
// Smi-tag the result in answer.
- ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
- __ lea(answer.reg(),
- Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
+ __ SmiTag(answer.reg());
deferred->BindExit();
left->Unuse();
right->Unuse();
@@ -1280,7 +1367,7 @@
ASSERT(kSmiTag == 0); // Adjust code below if not the case.
// Remove smi tag from the left operand (but keep sign).
// Left-hand operand has been copied into answer.
- __ sar(answer.reg(), kSmiTagSize);
+ __ SmiUntag(answer.reg());
// Do multiplication of smis, leaving result in answer.
__ imul(answer.reg(), Operand(right->reg()));
// Go slow on overflows.
@@ -1491,7 +1578,7 @@
void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> value,
- SmiAnalysis* type,
+ StaticType* type,
bool reversed,
OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
@@ -1627,7 +1714,7 @@
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ mov(answer.reg(), operand->reg());
- __ sar(answer.reg(), kSmiTagSize);
+ __ SmiUntag(answer.reg());
__ shr(answer.reg(), shift_value);
// A negative Smi shifted right two is in the positive Smi range.
if (shift_value < 2) {
@@ -1635,9 +1722,7 @@
deferred->Branch(not_zero);
}
operand->Unuse();
- ASSERT(kSmiTagSize == times_2); // Adjust the code if not true.
- __ lea(answer.reg(),
- Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
+ __ SmiTag(answer.reg());
deferred->BindExit();
frame_->Push(&answer);
}
@@ -1645,9 +1730,42 @@
case Token::SHL:
if (reversed) {
- Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ Result right;
+ Result right_copy_in_ecx;
+
+ // Make sure to get a copy of the right operand into ecx. This
+ // allows us to modify it without having to restore it in the
+ // deferred code.
+ operand->ToRegister();
+ if (operand->reg().is(ecx)) {
+ right = allocator()->Allocate();
+ __ mov(right.reg(), ecx);
+ frame_->Spill(ecx);
+ right_copy_in_ecx = *operand;
+ } else {
+ right_copy_in_ecx = allocator()->Allocate(ecx);
+ __ mov(ecx, operand->reg());
+ right = *operand;
+ }
+ operand->Unuse();
+
+ Result answer = allocator()->Allocate();
+ DeferredInlineSmiOperationReversed* deferred =
+ new DeferredInlineSmiOperationReversed(op,
+ answer.reg(),
+ smi_value,
+ right.reg(),
+ overwrite_mode);
+ __ mov(answer.reg(), Immediate(int_value));
+ __ sar(ecx, kSmiTagSize);
+ deferred->Branch(carry);
+ __ shl_cl(answer.reg());
+ __ cmp(answer.reg(), 0xc0000000);
+ deferred->Branch(sign);
+ __ SmiTag(answer.reg());
+
+ deferred->BindExit();
+ frame_->Push(&answer);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -1776,7 +1894,15 @@
}
-void CodeGenerator::Comparison(Condition cc,
+static bool CouldBeNaN(const Result& result) {
+ if (!result.is_constant()) return true;
+ if (!result.handle()->IsHeapNumber()) return false;
+ return isnan(HeapNumber::cast(*result.handle())->value());
+}
+
+
+void CodeGenerator::Comparison(AstNode* node,
+ Condition cc,
bool strict,
ControlDestination* dest) {
// Strict only makes sense for equality comparisons.
@@ -1795,15 +1921,28 @@
}
ASSERT(cc == less || cc == equal || cc == greater_equal);
- // If either side is a constant smi, optimize the comparison.
- bool left_side_constant_smi =
- left_side.is_constant() && left_side.handle()->IsSmi();
- bool right_side_constant_smi =
- right_side.is_constant() && right_side.handle()->IsSmi();
- bool left_side_constant_null =
- left_side.is_constant() && left_side.handle()->IsNull();
- bool right_side_constant_null =
- right_side.is_constant() && right_side.handle()->IsNull();
+ // If either side is a constant of some sort, we can probably optimize the
+ // comparison.
+ bool left_side_constant_smi = false;
+ bool left_side_constant_null = false;
+ bool left_side_constant_1_char_string = false;
+ if (left_side.is_constant()) {
+ left_side_constant_smi = left_side.handle()->IsSmi();
+ left_side_constant_null = left_side.handle()->IsNull();
+ left_side_constant_1_char_string =
+ (left_side.handle()->IsString() &&
+ (String::cast(*left_side.handle())->length() == 1));
+ }
+ bool right_side_constant_smi = false;
+ bool right_side_constant_null = false;
+ bool right_side_constant_1_char_string = false;
+ if (right_side.is_constant()) {
+ right_side_constant_smi = right_side.handle()->IsSmi();
+ right_side_constant_null = right_side.handle()->IsNull();
+ right_side_constant_1_char_string =
+ (right_side.handle()->IsString() &&
+ (String::cast(*right_side.handle())->length() == 1));
+ }
if (left_side_constant_smi || right_side_constant_smi) {
if (left_side_constant_smi && right_side_constant_smi) {
@@ -1823,7 +1962,8 @@
default:
UNREACHABLE();
}
- } else { // Only one side is a constant Smi.
+ } else {
+ // Only one side is a constant Smi.
// If left side is a constant Smi, reverse the operands.
// Since one side is a constant Smi, conversion order does not matter.
if (left_side_constant_smi) {
@@ -1837,6 +1977,8 @@
// Implement comparison against a constant Smi, inlining the case
// where both sides are Smis.
left_side.ToRegister();
+ Register left_reg = left_side.reg();
+ Handle<Object> right_val = right_side.handle();
// Here we split control flow to the stub call and inlined cases
// before finally splitting it to the control destination. We use
@@ -1844,13 +1986,52 @@
// the first split. We manually handle the off-frame references
// by reconstituting them on the non-fall-through path.
JumpTarget is_smi;
- Register left_reg = left_side.reg();
- Handle<Object> right_val = right_side.handle();
__ test(left_side.reg(), Immediate(kSmiTagMask));
is_smi.Branch(zero, taken);
+ bool is_for_loop_compare = (node->AsCompareOperation() != NULL)
+ && node->AsCompareOperation()->is_for_loop_condition();
+ if (!is_for_loop_compare
+ && CpuFeatures::IsSupported(SSE2)
+ && right_val->IsSmi()) {
+ // Right side is a constant smi and left side has been checked
+ // not to be a smi.
+ CpuFeatures::Scope use_sse2(SSE2);
+ JumpTarget not_number;
+ __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ not_number.Branch(not_equal, &left_side);
+ __ movdbl(xmm1,
+ FieldOperand(left_reg, HeapNumber::kValueOffset));
+ int value = Smi::cast(*right_val)->value();
+ if (value == 0) {
+ __ xorpd(xmm0, xmm0);
+ } else {
+ Result temp = allocator()->Allocate();
+ __ mov(temp.reg(), Immediate(value));
+ __ cvtsi2sd(xmm0, Operand(temp.reg()));
+ temp.Unuse();
+ }
+ __ comisd(xmm1, xmm0);
+ // Jump to builtin for NaN.
+ not_number.Branch(parity_even, &left_side);
+ left_side.Unuse();
+ Condition double_cc = cc;
+ switch (cc) {
+ case less: double_cc = below; break;
+ case equal: double_cc = equal; break;
+ case less_equal: double_cc = below_equal; break;
+ case greater: double_cc = above; break;
+ case greater_equal: double_cc = above_equal; break;
+ default: UNREACHABLE();
+ }
+ dest->true_target()->Branch(double_cc);
+ dest->false_target()->Jump();
+ not_number.Bind(&left_side);
+ }
+
// Setup and call the compare stub.
- CompareStub stub(cc, strict);
+ CompareStub stub(cc, strict, kCantBothBeNaN);
Result result = frame_->CallStub(&stub, &left_side, &right_side);
result.ToRegister();
__ cmp(result.reg(), 0);
@@ -1872,6 +2053,7 @@
right_side.Unuse();
dest->Split(cc);
}
+
} else if (cc == equal &&
(left_side_constant_null || right_side_constant_null)) {
// To make null checks efficient, we check if either the left side or
@@ -1908,17 +2090,153 @@
operand.Unuse();
dest->Split(not_zero);
}
- } else { // Neither side is a constant Smi or null.
+ } else if (left_side_constant_1_char_string ||
+ right_side_constant_1_char_string) {
+ if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
+ // Trivial case, comparing two constants.
+ int left_value = String::cast(*left_side.handle())->Get(0);
+ int right_value = String::cast(*right_side.handle())->Get(0);
+ switch (cc) {
+ case less:
+ dest->Goto(left_value < right_value);
+ break;
+ case equal:
+ dest->Goto(left_value == right_value);
+ break;
+ case greater_equal:
+ dest->Goto(left_value >= right_value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Only one side is a constant 1 character string.
+ // If left side is a constant 1-character string, reverse the operands.
+ // Since one side is a constant string, conversion order does not matter.
+ if (left_side_constant_1_char_string) {
+ Result temp = left_side;
+ left_side = right_side;
+ right_side = temp;
+ cc = ReverseCondition(cc);
+ // This may reintroduce greater or less_equal as the value of cc.
+ // CompareStub and the inline code both support all values of cc.
+ }
+ // Implement comparison against a constant string, inlining the case
+ // where both sides are strings.
+ left_side.ToRegister();
+
+ // Here we split control flow to the stub call and inlined cases
+ // before finally splitting it to the control destination. We use
+ // a jump target and branching to duplicate the virtual frame at
+ // the first split. We manually handle the off-frame references
+ // by reconstituting them on the non-fall-through path.
+ JumpTarget is_not_string, is_string;
+ Register left_reg = left_side.reg();
+ Handle<Object> right_val = right_side.handle();
+ __ test(left_side.reg(), Immediate(kSmiTagMask));
+ is_not_string.Branch(zero, &left_side);
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(),
+ FieldOperand(left_side.reg(), HeapObject::kMapOffset));
+ __ movzx_b(temp.reg(),
+ FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+ // If we are testing for equality then make use of the symbol shortcut.
+ // Check if the right left hand side has the same type as the left hand
+ // side (which is always a symbol).
+ if (cc == equal) {
+ Label not_a_symbol;
+ ASSERT(kSymbolTag != 0);
+ // Ensure that no non-strings have the symbol bit set.
+ ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ __ test(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
+ __ j(zero, ¬_a_symbol);
+ // They are symbols, so do identity compare.
+ __ cmp(left_side.reg(), right_side.handle());
+ dest->true_target()->Branch(equal);
+ dest->false_target()->Branch(not_equal);
+ __ bind(¬_a_symbol);
+ }
+ // If the receiver is not a string of the type we handle call the stub.
+ __ and_(temp.reg(),
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
+ __ cmp(temp.reg(), kStringTag | kSeqStringTag | kAsciiStringTag);
+ temp.Unuse();
+ is_string.Branch(equal, &left_side);
+
+ // Setup and call the compare stub.
+ is_not_string.Bind(&left_side);
+ CompareStub stub(cc, strict, kCantBothBeNaN);
+ Result result = frame_->CallStub(&stub, &left_side, &right_side);
+ result.ToRegister();
+ __ cmp(result.reg(), 0);
+ result.Unuse();
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+
+ is_string.Bind(&left_side);
+ // Here we know we have a sequential ASCII string.
+ left_side = Result(left_reg);
+ right_side = Result(right_val);
+ Result temp2 = allocator_->Allocate();
+ ASSERT(temp2.is_valid());
+ // Test string equality and comparison.
+ if (cc == equal) {
+ Label comparison_done;
+ __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
+ Immediate(1));
+ __ j(not_equal, &comparison_done);
+ uint8_t char_value =
+ static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
+ __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
+ char_value);
+ __ bind(&comparison_done);
+ } else {
+ __ mov(temp2.reg(),
+ FieldOperand(left_side.reg(), String::kLengthOffset));
+ __ sub(Operand(temp2.reg()), Immediate(1));
+ Label comparison;
+ // If the length is 0 then our subtraction gave -1 which compares less
+ // than any character.
+ __ j(negative, &comparison);
+ // Otherwise load the first character.
+ __ movzx_b(temp2.reg(),
+ FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
+ __ bind(&comparison);
+ // Compare the first character of the string with out constant
+ // 1-character string.
+ uint8_t char_value =
+ static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
+ __ cmp(Operand(temp2.reg()), Immediate(char_value));
+ Label characters_were_different;
+ __ j(not_equal, &characters_were_different);
+ // If the first character is the same then the long string sorts after
+ // the short one.
+ __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
+ Immediate(1));
+ __ bind(&characters_were_different);
+ }
+ temp2.Unuse();
+ left_side.Unuse();
+ right_side.Unuse();
+ dest->Split(cc);
+ }
+ } else {
+ // Neither side is a constant Smi or null.
// If either side is a non-smi constant, skip the smi check.
bool known_non_smi =
(left_side.is_constant() && !left_side.handle()->IsSmi()) ||
(right_side.is_constant() && !right_side.handle()->IsSmi());
+ NaNInformation nan_info =
+ (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
+ kBothCouldBeNaN :
+ kCantBothBeNaN;
left_side.ToRegister();
right_side.ToRegister();
if (known_non_smi) {
// When non-smi, call out to the compare stub.
- CompareStub stub(cc, strict);
+ CompareStub stub(cc, strict, nan_info);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
if (cc == equal) {
__ test(answer.reg(), Operand(answer.reg()));
@@ -1945,7 +2263,7 @@
temp.Unuse();
is_smi.Branch(zero, taken);
// When non-smi, call out to the compare stub.
- CompareStub stub(cc, strict);
+ CompareStub stub(cc, strict, nan_info);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
if (cc == equal) {
__ test(answer.reg(), Operand(answer.reg()));
@@ -1971,6 +2289,7 @@
// Call the function just below TOS on the stack with the given
// arguments. The receiver is the TOS.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+ CallFunctionFlags flags,
int position) {
// Push the arguments ("left-to-right") on the stack.
int arg_count = args->length();
@@ -1983,7 +2302,7 @@
// Use the shared code stub to call the function.
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
+ CallFunctionStub call_function(arg_count, in_loop, flags);
Result answer = frame_->CallStub(&call_function, arg_count + 1);
// Restore context and replace function on the stack with the
// result of the stub invocation.
@@ -2105,7 +2424,7 @@
__ bind(&adapted);
static const uint32_t kArgumentsLimit = 1 * KB;
__ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ shr(eax, kSmiTagSize);
+ __ SmiUntag(eax);
__ mov(ecx, Operand(eax));
__ cmp(eax, kArgumentsLimit);
build_args.Branch(above);
@@ -2153,7 +2472,7 @@
frame_->Push(&fn);
frame_->Push(&a1);
frame_->Push(&a2);
- CallFunctionStub call_function(2, NOT_IN_LOOP);
+ CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
Result res = frame_->CallStub(&call_function, 3);
frame_->Push(&res);
@@ -2575,7 +2894,7 @@
// Compare and branch to the body if true or the next test if
// false. Prefer the next test as a fall through.
ControlDestination dest(clause->body_target(), &next_test, false);
- Comparison(equal, true, &dest);
+ Comparison(node, equal, true, &dest);
// If the comparison fell through to the true target, jump to the
// actual body.
@@ -3132,7 +3451,7 @@
frame_->EmitPush(eax); // <- slot 3
frame_->EmitPush(edx); // <- slot 2
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
- __ shl(eax, kSmiTagSize);
+ __ SmiTag(eax);
frame_->EmitPush(eax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
entry.Jump();
@@ -3144,7 +3463,7 @@
// Push the length of the array and the initial index onto the stack.
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
- __ shl(eax, kSmiTagSize);
+ __ SmiTag(eax);
frame_->EmitPush(eax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
@@ -3260,13 +3579,9 @@
frame_->EmitPush(eax);
// Store the caught exception in the catch variable.
- { Reference ref(this, node->catch_var());
- ASSERT(ref.is_slot());
- // Load the exception to the top of the stack. Here we make use of the
- // convenient property that it doesn't matter whether a value is
- // immediately on top of or underneath a zero-sized reference.
- ref.SetValue(NOT_CONST_INIT);
- }
+ Variable* catch_var = node->catch_var()->var();
+ ASSERT(catch_var != NULL && catch_var->slot() != NULL);
+ StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
// Remove the exception from the stack.
frame_->Drop();
@@ -3585,18 +3900,28 @@
void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
- // Call the runtime to instantiate the function boilerplate object.
+ ASSERT(boilerplate->IsBoilerplate());
+
// The inevitable call will sync frame elements to memory anyway, so
// we do it eagerly to allow us to push the arguments directly into
// place.
- ASSERT(boilerplate->IsBoilerplate());
frame_->SyncRange(0, frame_->element_count() - 1);
- // Create a new closure.
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(boilerplate));
- Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
- frame_->Push(&result);
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
+ FastNewClosureStub stub;
+ frame_->EmitPush(Immediate(boilerplate));
+ Result answer = frame_->CallStub(&stub, 1);
+ frame_->Push(&answer);
+ } else {
+ // Call the runtime to instantiate the function boilerplate
+ // object.
+ frame_->EmitPush(esi);
+ frame_->EmitPush(Immediate(boilerplate));
+ Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
+ frame_->Push(&result);
+ }
}
@@ -4094,46 +4419,10 @@
}
-// Materialize the object literal 'node' in the literals array
-// 'literals' of the function. Leave the object boilerplate in
-// 'boilerplate'.
-class DeferredObjectLiteral: public DeferredCode {
- public:
- DeferredObjectLiteral(Register boilerplate,
- Register literals,
- ObjectLiteral* node)
- : boilerplate_(boilerplate), literals_(literals), node_(node) {
- set_comment("[ DeferredObjectLiteral");
- }
-
- void Generate();
-
- private:
- Register boilerplate_;
- Register literals_;
- ObjectLiteral* node_;
-};
-
-
-void DeferredObjectLiteral::Generate() {
- // Since the entry is undefined we call the runtime system to
- // compute the literal.
- // Literal array (0).
- __ push(literals_);
- // Literal index (1).
- __ push(Immediate(Smi::FromInt(node_->literal_index())));
- // Constant properties (2).
- __ push(Immediate(node_->constant_properties()));
- __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
- if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
-}
-
-
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Comment cmnt(masm_, "[ ObjectLiteral");
- // Retrieve the literals array and check the allocated entry. Begin
- // with a writable copy of the function of this activation in a
+ // Load a writable copy of the function of this activation in a
// register.
frame_->PushFunction();
Result literals = frame_->Pop();
@@ -4143,32 +4432,18 @@
// Load the literals array of the function.
__ mov(literals.reg(),
FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- Result boilerplate = allocator_->Allocate();
- ASSERT(boilerplate.is_valid());
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
- // Check whether we need to materialize the object literal boilerplate.
- // If so, jump to the deferred code passing the literals array.
- DeferredObjectLiteral* deferred =
- new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
- __ cmp(boilerplate.reg(), Factory::undefined_value());
- deferred->Branch(equal);
- deferred->BindExit();
- literals.Unuse();
-
- // Push the boilerplate object.
- frame_->Push(&boilerplate);
- // Clone the boilerplate object.
- Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
- if (node->depth() == 1) {
- clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ // Literal array.
+ frame_->Push(&literals);
+ // Literal index.
+ frame_->Push(Smi::FromInt(node->literal_index()));
+ // Constant properties.
+ frame_->Push(node->constant_properties());
+ Result clone;
+ if (node->depth() > 1) {
+ clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ } else {
+ clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
}
- Result clone = frame_->CallRuntime(clone_function_id, 1);
- // Push the newly cloned literal object as the result.
frame_->Push(&clone);
for (int i = 0; i < node->properties()->length(); i++) {
@@ -4228,45 +4503,10 @@
}
-// Materialize the array literal 'node' in the literals array 'literals'
-// of the function. Leave the array boilerplate in 'boilerplate'.
-class DeferredArrayLiteral: public DeferredCode {
- public:
- DeferredArrayLiteral(Register boilerplate,
- Register literals,
- ArrayLiteral* node)
- : boilerplate_(boilerplate), literals_(literals), node_(node) {
- set_comment("[ DeferredArrayLiteral");
- }
-
- void Generate();
-
- private:
- Register boilerplate_;
- Register literals_;
- ArrayLiteral* node_;
-};
-
-
-void DeferredArrayLiteral::Generate() {
- // Since the entry is undefined we call the runtime system to
- // compute the literal.
- // Literal array (0).
- __ push(literals_);
- // Literal index (1).
- __ push(Immediate(Smi::FromInt(node_->literal_index())));
- // Constant properties (2).
- __ push(Immediate(node_->literals()));
- __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
- if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
-}
-
-
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
Comment cmnt(masm_, "[ ArrayLiteral");
- // Retrieve the literals array and check the allocated entry. Begin
- // with a writable copy of the function of this activation in a
+ // Load a writable copy of the function of this activation in a
// register.
frame_->PushFunction();
Result literals = frame_->Pop();
@@ -4277,36 +4517,24 @@
__ mov(literals.reg(),
FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
- // Load the literal at the ast saved index.
- Result boilerplate = allocator_->Allocate();
- ASSERT(boilerplate.is_valid());
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
- // Check whether we need to materialize the object literal boilerplate.
- // If so, jump to the deferred code passing the literals array.
- DeferredArrayLiteral* deferred =
- new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
- __ cmp(boilerplate.reg(), Factory::undefined_value());
- deferred->Branch(equal);
- deferred->BindExit();
- literals.Unuse();
-
- // Push the resulting array literal boilerplate on the stack.
- frame_->Push(&boilerplate);
- // Clone the boilerplate object.
- Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
- if (node->depth() == 1) {
- clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ frame_->Push(&literals);
+ frame_->Push(Smi::FromInt(node->literal_index()));
+ frame_->Push(node->constant_elements());
+ int length = node->values()->length();
+ Result clone;
+ if (node->depth() > 1) {
+ clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
+ } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
+ clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+ } else {
+ FastCloneShallowArrayStub stub(length);
+ clone = frame_->CallStub(&stub, 3);
}
- Result clone = frame_->CallRuntime(clone_function_id, 1);
- // Push the newly cloned literal object as the result.
frame_->Push(&clone);
// Generate code to set the elements in the array that are not
// literals.
- for (int i = 0; i < node->values()->length(); i++) {
+ for (int i = 0; i < length; i++) {
Expression* value = node->values()->at(i);
// If value is a literal the property value is already set in the
@@ -4502,27 +4730,24 @@
frame_->Push(Factory::undefined_value());
}
+ // Push the receiver.
+ frame_->PushParameterAt(-1);
+
// Resolve the call.
Result result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
- // Touch up the stack with the right values for the function and the
- // receiver. Use a scratch register to avoid destroying the result.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ mov(scratch.reg(), FieldOperand(result.reg(), FixedArray::kHeaderSize));
- frame_->SetElementAt(arg_count + 1, &scratch);
-
- // We can reuse the result register now.
- frame_->Spill(result.reg());
- __ mov(result.reg(),
- FieldOperand(result.reg(), FixedArray::kHeaderSize + kPointerSize));
- frame_->SetElementAt(arg_count, &result);
+ // The runtime call returns a pair of values in eax (function) and
+ // edx (receiver). Touch up the stack with the right values.
+ Result receiver = allocator_->Allocate(edx);
+ frame_->SetElementAt(arg_count + 1, &result);
+ frame_->SetElementAt(arg_count, &receiver);
+ receiver.Unuse();
// Call the function.
CodeForSourcePosition(node->position());
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
+ CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
result = frame_->CallStub(&call_function, arg_count + 1);
// Restore the context and overwrite the function on the stack with
@@ -4535,9 +4760,6 @@
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
- // Push the name of the function and the receiver onto the stack.
- frame_->Push(var->name());
-
// Pass the global object as the receiver and let the IC stub
// patch the stack to use the global proxy as 'this' in the
// invoked function.
@@ -4549,14 +4771,16 @@
Load(args->at(i));
}
+ // Push the name of the function onto the frame.
+ frame_->Push(var->name());
+
// Call the IC initialization code.
CodeForSourcePosition(node->position());
Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
arg_count,
loop_nesting());
frame_->RestoreContextRegister();
- // Replace the function on the stack with the result.
- frame_->SetElementAt(0, &result);
+ frame_->Push(&result);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
@@ -4583,7 +4807,7 @@
frame_->EmitPush(edx);
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
} else if (property != NULL) {
// Check if the key is a literal string.
@@ -4609,8 +4833,7 @@
node->position());
} else {
- // Push the name of the function and the receiver onto the stack.
- frame_->Push(name);
+ // Push the receiver onto the frame.
Load(property->obj());
// Load the arguments.
@@ -4619,14 +4842,16 @@
Load(args->at(i));
}
+ // Push the name of the function onto the frame.
+ frame_->Push(name);
+
// Call the IC initialization code.
CodeForSourcePosition(node->position());
Result result =
frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
loop_nesting());
frame_->RestoreContextRegister();
- // Replace the function on the stack with the result.
- frame_->SetElementAt(0, &result);
+ frame_->Push(&result);
}
} else {
@@ -4648,7 +4873,7 @@
}
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
}
} else {
@@ -4663,7 +4888,7 @@
LoadGlobalReceiver();
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
}
}
@@ -4817,7 +5042,7 @@
__ test(index.reg(), Immediate(kSmiTagMask | 0x80000000));
__ j(not_zero, &slow_case);
// Untag the index.
- __ sar(index.reg(), kSmiTagSize);
+ __ SmiUntag(index.reg());
__ bind(&try_again_with_new_string);
// Fetch the instance type of the receiver into ecx.
@@ -4860,8 +5085,7 @@
times_1,
SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
- ASSERT(kSmiTag == 0);
- __ shl(temp.reg(), kSmiTagSize);
+ __ SmiTag(temp.reg());
__ jmp(&end);
// Handle non-flat strings.
@@ -5193,75 +5417,6 @@
}
-void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
- JumpTarget done;
- JumpTarget call_runtime;
- ASSERT(args->length() == 1);
-
- // Load number and duplicate it.
- Load(args->at(0));
- frame_->Dup();
-
- // Get the number into an unaliased register and load it onto the
- // floating point stack still leaving one copy on the frame.
- Result number = frame_->Pop();
- number.ToRegister();
- frame_->Spill(number.reg());
- FloatingPointHelper::LoadFloatOperand(masm_, number.reg());
- number.Unuse();
-
- // Perform the operation on the number.
- switch (op) {
- case SIN:
- __ fsin();
- break;
- case COS:
- __ fcos();
- break;
- }
-
- // Go slow case if argument to operation is out of range.
- Result eax_reg = allocator_->Allocate(eax);
- ASSERT(eax_reg.is_valid());
- __ fnstsw_ax();
- __ sahf();
- eax_reg.Unuse();
- call_runtime.Branch(parity_even, not_taken);
-
- // Allocate heap number for result if possible.
- Result scratch1 = allocator()->Allocate();
- Result scratch2 = allocator()->Allocate();
- Result heap_number = allocator()->Allocate();
- __ AllocateHeapNumber(heap_number.reg(),
- scratch1.reg(),
- scratch2.reg(),
- call_runtime.entry_label());
- scratch1.Unuse();
- scratch2.Unuse();
-
- // Store the result in the allocated heap number.
- __ fstp_d(FieldOperand(heap_number.reg(), HeapNumber::kValueOffset));
- // Replace the extra copy of the argument with the result.
- frame_->SetElementAt(0, &heap_number);
- done.Jump();
-
- call_runtime.Bind();
- // Free ST(0) which was not popped before calling into the runtime.
- __ ffree(0);
- Result answer;
- switch (op) {
- case SIN:
- answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
- break;
- case COS:
- answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
- break;
- }
- frame_->Push(&answer);
- done.Bind();
-}
-
-
void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
@@ -5274,6 +5429,45 @@
}
+void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(3, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+
+ SubStringStub stub;
+ Result answer = frame_->CallStub(&stub, 3);
+ frame_->Push(&answer);
+}
+
+
+void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ StringCompareStub stub;
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
+}
+
+
+void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 4);
+
+ // Load the arguments on the stack and call the stub.
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+ Load(args->at(3));
+ RegExpExecStub stub;
+ Result result = frame_->CallStub(&stub, 4);
+ frame_->Push(&result);
+}
+
+
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (CheckForInlineRuntimeCall(node)) {
return;
@@ -5284,8 +5478,6 @@
Runtime::Function* function = node->function();
if (function == NULL) {
- // Prepare stack for calling JS runtime function.
- frame_->Push(node->name());
// Push the builtins object found in the current global object.
Result temp = allocator()->Allocate();
ASSERT(temp.is_valid());
@@ -5302,11 +5494,12 @@
if (function == NULL) {
// Call the JS runtime function.
+ frame_->Push(node->name());
Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
arg_count,
loop_nesting_);
frame_->RestoreContextRegister();
- frame_->SetElementAt(0, &answer);
+ frame_->Push(&answer);
} else {
// Call the C runtime function.
Result answer = frame_->CallRuntime(function, arg_count);
@@ -5403,12 +5596,12 @@
} else {
Load(node->expression());
+ bool overwrite =
+ (node->expression()->AsBinaryOperation() != NULL &&
+ node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
switch (op) {
case Token::SUB: {
- bool overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- UnarySubStub stub(overwrite);
+ GenericUnaryOpStub stub(Token::SUB, overwrite);
// TODO(1222589): remove dependency of TOS being cached inside stub
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
@@ -5425,16 +5618,16 @@
__ test(operand.reg(), Immediate(kSmiTagMask));
smi_label.Branch(zero, &operand, taken);
- frame_->Push(&operand); // undo popping of TOS
- Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
- CALL_FUNCTION, 1);
-
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ Result answer = frame_->CallStub(&stub, &operand);
continue_label.Jump(&answer);
+
smi_label.Bind(&answer);
answer.ToRegister();
frame_->Spill(answer.reg());
__ not_(answer.reg());
__ and_(answer.reg(), ~kSmiTagMask); // Remove inverted smi-tag.
+
continue_label.Bind(&answer);
frame_->Push(&answer);
break;
@@ -5816,6 +6009,8 @@
void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
Comment cmnt(masm_, "[ CompareOperation");
+ bool left_already_loaded = false;
+
// Get the expressions from the node.
Expression* left = node->left();
Expression* right = node->right();
@@ -5896,7 +6091,6 @@
__ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
answer.Unuse();
destination()->Split(equal);
-
} else if (check->Equals(Heap::object_symbol())) {
__ test(answer.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(zero);
@@ -5928,6 +6122,38 @@
destination()->Goto(false);
}
return;
+ } else if (op == Token::LT &&
+ right->AsLiteral() != NULL &&
+ right->AsLiteral()->handle()->IsHeapNumber()) {
+ Handle<HeapNumber> check(HeapNumber::cast(*right->AsLiteral()->handle()));
+ if (check->value() == 2147483648.0) { // 0x80000000.
+ Load(left);
+ left_already_loaded = true;
+ Result lhs = frame_->Pop();
+ lhs.ToRegister();
+ __ test(lhs.reg(), Immediate(kSmiTagMask));
+ destination()->true_target()->Branch(zero); // All Smis are less.
+ Result scratch = allocator()->Allocate();
+ ASSERT(scratch.is_valid());
+ __ mov(scratch.reg(), FieldOperand(lhs.reg(), HeapObject::kMapOffset));
+ __ cmp(scratch.reg(), Factory::heap_number_map());
+ JumpTarget not_a_number;
+ not_a_number.Branch(not_equal, &lhs);
+ __ mov(scratch.reg(),
+ FieldOperand(lhs.reg(), HeapNumber::kExponentOffset));
+ __ cmp(Operand(scratch.reg()), Immediate(0xfff00000));
+ not_a_number.Branch(above_equal, &lhs); // It's a negative NaN or -Inf.
+ const uint32_t borderline_exponent =
+ (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch.reg()), Immediate(borderline_exponent));
+ scratch.Unuse();
+ lhs.Unuse();
+ destination()->true_target()->Branch(less);
+ destination()->false_target()->Jump();
+
+ not_a_number.Bind(&lhs);
+ frame_->Push(&lhs);
+ }
}
Condition cc = no_condition;
@@ -5952,14 +6178,14 @@
cc = greater_equal;
break;
case Token::IN: {
- Load(left);
+ if (!left_already_loaded) Load(left);
Load(right);
Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
frame_->Push(&answer); // push the result
return;
}
case Token::INSTANCEOF: {
- Load(left);
+ if (!left_already_loaded) Load(left);
Load(right);
InstanceofStub stub;
Result answer = frame_->CallStub(&stub, 2);
@@ -5972,9 +6198,9 @@
default:
UNREACHABLE();
}
- Load(left);
+ if (!left_already_loaded) Load(left);
Load(right);
- Comparison(cc, strict, destination());
+ Comparison(node, cc, strict, destination());
}
@@ -6314,7 +6540,7 @@
// Shift the key to get the actual index value and check that
// it is within bounds.
__ mov(index.reg(), key.reg());
- __ sar(index.reg(), kSmiTagSize);
+ __ SmiUntag(index.reg());
__ cmp(index.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
@@ -6428,7 +6654,7 @@
// a loop and the key is likely to be a smi.
Property* property = expression()->AsProperty();
ASSERT(property != NULL);
- SmiAnalysis* key_smi_analysis = property->key()->type();
+ StaticType* key_smi_analysis = property->key()->type();
if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
Comment cmnt(masm, "[ Inlined store to keyed Property");
@@ -6529,6 +6755,142 @@
}
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+ // Clone the boilerplate in new space. Set the context to the
+ // current context in esi.
+ Label gc;
+ __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
+
+ // Get the boilerplate function from the stack.
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+
+ // Compute the function map in the current global context and set that
+ // as the map of the allocated object.
+ __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
+ __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
+
+ // Clone the rest of the boilerplate fields. We don't have to update
+ // the write barrier because the allocated object is in new space.
+ for (int offset = kPointerSize;
+ offset < JSFunction::kSize;
+ offset += kPointerSize) {
+ if (offset == JSFunction::kContextOffset) {
+ __ mov(FieldOperand(eax, offset), esi);
+ } else {
+ __ mov(ebx, FieldOperand(edx, offset));
+ __ mov(FieldOperand(eax, offset), ebx);
+ }
+ }
+
+ // Return and remove the on-stack parameter.
+ __ ret(1 * kPointerSize);
+
+ // Create a new closure through the slower runtime call.
+ __ bind(&gc);
+ __ pop(ecx); // Temporarily remove return address.
+ __ pop(edx);
+ __ push(esi);
+ __ push(edx);
+ __ push(ecx); // Restore return address.
+ __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
+ eax, ebx, ecx, &gc, TAG_OBJECT);
+
+ // Get the function from the stack.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+
+ // Setup the object header.
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
+ __ mov(FieldOperand(eax, Array::kLengthOffset), Immediate(length));
+
+ // Setup the fixed slots.
+ __ xor_(ebx, Operand(ebx)); // Set to NULL.
+ __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
+ __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
+ __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
+ __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
+
+ // Copy the global object from the surrounding context. We go through the
+ // context in the function (ecx) to match the allocation behavior we have
+ // in the runtime system (see Heap::AllocateFunctionContext).
+ __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset));
+ __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
+
+ // Initialize the rest of the slots to undefined.
+ __ mov(ebx, Factory::undefined_value());
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
+ }
+
+ // Return and remove the on-stack parameter.
+ __ mov(esi, Operand(eax));
+ __ ret(1 * kPointerSize);
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
+}
+
+
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+ int size = JSArray::kSize + elements_size;
+
+ // Load boilerplate object into ecx and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ mov(ecx, Operand(esp, 3 * kPointerSize));
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
+ __ mov(ecx, FieldOperand(ecx, eax, times_2, FixedArray::kHeaderSize));
+ __ cmp(ecx, Factory::undefined_value());
+ __ j(equal, &slow_case);
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+ __ mov(ebx, FieldOperand(ecx, i));
+ __ mov(FieldOperand(eax, i), ebx);
+ }
+ }
+
+ if (length_ > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
+ __ lea(edx, Operand(eax, JSArray::kSize));
+ __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
+
+ // Copy the elements array.
+ for (int i = 0; i < elements_size; i += kPointerSize) {
+ __ mov(ebx, FieldOperand(ecx, i));
+ __ mov(FieldOperand(edx, i), ebx);
+ }
+ }
+
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
+
+ __ bind(&slow_case);
+ ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
+ __ TailCallRuntime(runtime, 3, 1);
+}
+
+
// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result, true_result, not_string;
@@ -6749,7 +7111,7 @@
// If the smi tag is 0 we can just leave the tag on one operand.
ASSERT(kSmiTag == 0); // adjust code below if not the case
// Remove tag from one of the operands (but keep sign).
- __ sar(eax, kSmiTagSize);
+ __ SmiUntag(eax);
// Do multiplication.
__ imul(eax, Operand(ebx)); // multiplication of smis; result in eax
// Go slow on overflows.
@@ -6773,8 +7135,7 @@
__ test(edx, Operand(edx));
__ j(not_zero, slow);
// Tag the result and store it in register eax.
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
- __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+ __ SmiTag(eax);
break;
case Token::MOD:
@@ -6804,8 +7165,8 @@
// Move the second operand into register ecx.
__ mov(ecx, Operand(ebx));
// Remove tags from operands (but keep sign).
- __ sar(eax, kSmiTagSize);
- __ sar(ecx, kSmiTagSize);
+ __ SmiUntag(eax);
+ __ SmiUntag(ecx);
// Perform the operation.
switch (op_) {
case Token::SAR:
@@ -6833,8 +7194,7 @@
UNREACHABLE();
}
// Tag the result and store it in register eax.
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
- __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+ __ SmiTag(eax);
break;
default:
@@ -6959,42 +7319,12 @@
case Token::SAR:
case Token::SHL:
case Token::SHR: {
- FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
- FloatingPointHelper::LoadFloatOperands(masm, ecx);
-
- Label skip_allocation, non_smi_result, operand_conversion_failure;
-
- // Reserve space for converted numbers.
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
-
- if (use_sse3_) {
- // Truncate the operands to 32-bit integers and check for
- // exceptions in doing so.
- CpuFeatures::Scope scope(SSE3);
- __ fisttp_s(Operand(esp, 0 * kPointerSize));
- __ fisttp_s(Operand(esp, 1 * kPointerSize));
- __ fnstsw_ax();
- __ test(eax, Immediate(1));
- __ j(not_zero, &operand_conversion_failure);
- } else {
- // Check if right operand is int32.
- __ fist_s(Operand(esp, 0 * kPointerSize));
- __ fild_s(Operand(esp, 0 * kPointerSize));
- __ FCmp();
- __ j(not_zero, &operand_conversion_failure);
- __ j(parity_even, &operand_conversion_failure);
-
- // Check if left operand is int32.
- __ fist_s(Operand(esp, 1 * kPointerSize));
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ FCmp();
- __ j(not_zero, &operand_conversion_failure);
- __ j(parity_even, &operand_conversion_failure);
- }
-
- // Get int32 operands and perform bitop.
- __ pop(ecx);
- __ pop(eax);
+ Label non_smi_result, skip_allocation;
+ Label operand_conversion_failure;
+ FloatingPointHelper::LoadAsIntegers(
+ masm,
+ use_sse3_,
+ &operand_conversion_failure);
switch (op_) {
case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
@@ -7014,8 +7344,7 @@
__ j(negative, &non_smi_result);
}
// Tag smi result and return.
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
- __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+ __ SmiTag(eax);
GenerateReturn(masm);
// All ops except SHR return a signed int32 that we load in a HeapNumber.
@@ -7040,28 +7369,20 @@
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(ebx));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
GenerateReturn(masm);
}
- // Clear the FPU exception flag and reset the stack before calling
- // the runtime system.
+ // Go to runtime for non-number inputs.
__ bind(&operand_conversion_failure);
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- if (use_sse3_) {
- // If we've used the SSE3 instructions for truncating the
- // floating point values to integers and it failed, we have a
- // pending #IA exception. Clear it.
- __ fnclex();
- } else {
- // The non-SSE3 variant does early bailout if the right
- // operand isn't a 32-bit integer, so we may have a single
- // value on the FPU stack we need to get rid of.
- __ ffree(0);
- }
-
// SHR should return uint32 - go to runtime for non-smi/negative result.
if (op_ == Token::SHR) {
__ bind(&non_smi_result);
@@ -7185,6 +7506,197 @@
}
+// Get the integer part of a heap number. Surprisingly, all this bit twiddling
+// is faster than using the built-in instructions on floating point registers.
+// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
+// trashed registers.
+void IntegerConvert(MacroAssembler* masm,
+ Register source,
+ bool use_sse3,
+ Label* conversion_failure) {
+ Label done, right_exponent, normal_exponent;
+ Register scratch = ebx;
+ Register scratch2 = edi;
+ // Get exponent word.
+ __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kExponentMask);
+ if (use_sse3) {
+ CpuFeatures::Scope scope(SSE3);
+ // Check whether the exponent is too big for a 64 bit signed integer.
+ static const uint32_t kTooBigExponent =
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+ __ j(greater_equal, conversion_failure);
+ // Load x87 register with heap number.
+ __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
+ // Reserve space for 64 bit answer.
+ __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ // Do conversion, which cannot fail because we checked the exponent.
+ __ fisttp_d(Operand(esp, 0));
+ __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
+ __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ } else {
+ // Load ecx with zero. We use this either for the final shift or
+ // for the answer.
+ __ xor_(ecx, Operand(ecx));
+ // Check whether the exponent matches a 32 bit signed int that cannot be
+ // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
+ // exponent is 30 (biased). This is the exponent that we are fastest at and
+ // also the highest exponent we can handle here.
+ const uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
+ // If we have a match of the int32-but-not-Smi exponent then skip some
+ // logic.
+ __ j(equal, &right_exponent);
+ // If the exponent is higher than that then go to slow case. This catches
+ // numbers that don't fit in a signed int32, infinities and NaNs.
+ __ j(less, &normal_exponent);
+
+ {
+ // Handle a big exponent. The only reason we have this code is that the
+ // >>> operator has a tendency to generate numbers with an exponent of 31.
+ const uint32_t big_non_smi_exponent =
+ (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
+ __ j(not_equal, conversion_failure);
+ // We have the big exponent, typically from >>>. This means the number is
+ // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kMantissaMask);
+ // Put back the implicit 1.
+ __ or_(scratch2, 1 << HeapNumber::kExponentShift);
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to use the full unsigned range so we subtract 1 bit from the
+ // shift distance.
+ const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ __ shl(scratch2, big_shift_distance);
+ // Get the second half of the double.
+ __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 21 bits to get the most significant 11 bits or the low
+ // mantissa word.
+ __ shr(ecx, 32 - big_shift_distance);
+ __ or_(ecx, Operand(scratch2));
+ // We have the answer in ecx, but we may need to negate it.
+ __ test(scratch, Operand(scratch));
+ __ j(positive, &done);
+ __ neg(ecx);
+ __ jmp(&done);
+ }
+
+ __ bind(&normal_exponent);
+ // Exponent word in scratch, exponent part of exponent word in scratch2.
+ // Zero in ecx.
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent =
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+ __ sub(Operand(scratch2), Immediate(zero_exponent));
+ // ecx already has a Smi zero.
+ __ j(less, &done);
+
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ shr(scratch2, HeapNumber::kExponentShift);
+ __ mov(ecx, Immediate(30));
+ __ sub(ecx, Operand(scratch2));
+
+ __ bind(&right_exponent);
+ // Here ecx is the shift, scratch is the exponent word.
+ // Get the top bits of the mantissa.
+ __ and_(scratch, HeapNumber::kMantissaMask);
+ // Put back the implicit 1.
+ __ or_(scratch, 1 << HeapNumber::kExponentShift);
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We have kExponentShift + 1 significant bits int he low end of the
+ // word. Shift them to the top bits.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ shl(scratch, shift_distance);
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the most significant 10 bits or the low
+ // mantissa word.
+ __ shr(scratch2, 32 - shift_distance);
+ __ or_(scratch2, Operand(scratch));
+ // Move down according to the exponent.
+ __ shr_cl(scratch2);
+ // Now the unsigned answer is in scratch2. We need to move it to ecx and
+ // we may need to fix the sign.
+ Label negative;
+ __ xor_(ecx, Operand(ecx));
+ __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
+ __ j(greater, &negative);
+ __ mov(ecx, scratch2);
+ __ jmp(&done);
+ __ bind(&negative);
+ __ sub(ecx, Operand(scratch2));
+ __ bind(&done);
+ }
+}
+
+
+// Input: edx, eax are the left and right objects of a bit op.
+// Output: eax, ecx are left and right integers for a bit op.
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* conversion_failure) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg1_is_object);
+ __ SmiUntag(edx);
+ __ jmp(&load_arg2);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg1);
+ __ cmp(edx, Factory::undefined_value());
+ __ j(not_equal, conversion_failure);
+ __ mov(edx, Immediate(0));
+ __ jmp(&load_arg2);
+
+ __ bind(&arg1_is_object);
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(ebx, Factory::heap_number_map());
+ __ j(not_equal, &check_undefined_arg1);
+ // Get the untagged integer version of the edx heap number in ecx.
+ IntegerConvert(masm, edx, use_sse3, conversion_failure);
+ __ mov(edx, ecx);
+
+ // Here edx has the untagged integer, eax has a Smi or a heap number.
+ __ bind(&load_arg2);
+ // Test if arg2 is a Smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg2_is_object);
+ __ SmiUntag(eax);
+ __ mov(ecx, eax);
+ __ jmp(&done);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg2);
+ __ cmp(eax, Factory::undefined_value());
+ __ j(not_equal, conversion_failure);
+ __ mov(ecx, Immediate(0));
+ __ jmp(&done);
+
+ __ bind(&arg2_is_object);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(ebx, Factory::heap_number_map());
+ __ j(not_equal, &check_undefined_arg2);
+ // Get the untagged integer version of the eax heap number in ecx.
+ IntegerConvert(masm, eax, use_sse3, conversion_failure);
+ __ bind(&done);
+ __ mov(eax, edx);
+}
+
+
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
Label load_smi, done;
@@ -7195,7 +7707,7 @@
__ jmp(&done);
__ bind(&load_smi);
- __ sar(number, kSmiTagSize);
+ __ SmiUntag(number);
__ push(number);
__ fild_s(Operand(esp, 0));
__ pop(number);
@@ -7221,14 +7733,14 @@
__ j(equal, &load_float_eax);
__ jmp(not_numbers); // Argument in eax is not a number.
__ bind(&load_smi_edx);
- __ sar(edx, 1); // Untag smi before converting to float.
+ __ SmiUntag(edx); // Untag smi before converting to float.
__ cvtsi2sd(xmm0, Operand(edx));
- __ shl(edx, 1); // Retag smi for heap number overwriting test.
+ __ SmiTag(edx); // Retag smi for heap number overwriting test.
__ jmp(&load_eax);
__ bind(&load_smi_eax);
- __ sar(eax, 1); // Untag smi before converting to float.
+ __ SmiUntag(eax); // Untag smi before converting to float.
__ cvtsi2sd(xmm1, Operand(eax));
- __ shl(eax, 1); // Retag smi for heap number overwriting test.
+ __ SmiTag(eax); // Retag smi for heap number overwriting test.
__ jmp(&done);
__ bind(&load_float_eax);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
@@ -7252,14 +7764,14 @@
__ jmp(&done);
__ bind(&load_smi_1);
- __ sar(scratch, kSmiTagSize);
+ __ SmiUntag(scratch);
__ push(scratch);
__ fild_s(Operand(esp, 0));
__ pop(scratch);
__ jmp(&done_load_1);
__ bind(&load_smi_2);
- __ sar(scratch, kSmiTagSize);
+ __ SmiUntag(scratch);
__ push(scratch);
__ fild_s(Operand(esp, 0));
__ pop(scratch);
@@ -7292,86 +7804,142 @@
}
-void UnarySubStub::Generate(MacroAssembler* masm) {
- Label undo;
- Label slow;
- Label done;
- Label try_float;
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ Label slow, done;
- // Check whether the value is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &try_float, not_taken);
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &try_float, not_taken);
- // Enter runtime system if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- __ test(eax, Operand(eax));
- __ j(zero, &slow, not_taken);
+ // Go slow case if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ __ test(eax, Operand(eax));
+ __ j(zero, &slow, not_taken);
- // The value of the expression is a smi that is not zero. Try
- // optimistic subtraction '0 - value'.
- __ mov(edx, Operand(eax));
- __ Set(eax, Immediate(0));
- __ sub(eax, Operand(edx));
- __ j(overflow, &undo, not_taken);
-
- // If result is a smi we are done.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done, taken);
-
- // Restore eax and enter runtime system.
- __ bind(&undo);
- __ mov(eax, Operand(edx));
-
- // Enter runtime system.
- __ bind(&slow);
- __ pop(ecx); // pop return address
- __ push(eax);
- __ push(ecx); // push return address
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
-
- // Try floating point case.
- __ bind(&try_float);
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &slow);
- if (overwrite_) {
- __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
- __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
- } else {
+ // The value of the expression is a smi that is not zero. Try
+ // optimistic subtraction '0 - value'.
+ Label undo;
__ mov(edx, Operand(eax));
- // edx: operand
- __ AllocateHeapNumber(eax, ebx, ecx, &undo);
- // eax: allocated 'empty' number
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+ __ Set(eax, Immediate(0));
+ __ sub(eax, Operand(edx));
+ __ j(overflow, &undo, not_taken);
+
+ // If result is a smi we are done.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done, taken);
+
+ // Restore eax and go slow case.
+ __ bind(&undo);
+ __ mov(eax, Operand(edx));
+ __ jmp(&slow);
+
+ // Try floating point case.
+ __ bind(&try_float);
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &slow);
+ if (overwrite_) {
+ __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+ __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
+ } else {
+ __ mov(edx, Operand(eax));
+ // edx: operand
+ __ AllocateHeapNumber(eax, ebx, ecx, &undo);
+ // eax: allocated 'empty' number
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
+ __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &slow, not_taken);
+
+ // Convert the heap number in eax to an untagged integer in ecx.
+ IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), &slow);
+
+ // Do the bitwise operation and check if the result fits in a smi.
+ Label try_float;
+ __ not_(ecx);
+ __ cmp(ecx, 0xc0000000);
+ __ j(sign, &try_float, not_taken);
+
+ // Tag the result as a smi and we're done.
+ ASSERT(kSmiTagSize == 1);
+ __ lea(eax, Operand(ecx, times_2, kSmiTag));
+ __ jmp(&done);
+
+ // Try to store the result in a heap number.
+ __ bind(&try_float);
+ if (!overwrite_) {
+ // Allocate a fresh heap number, but don't overwrite eax until
+ // we're sure we can do it without going through the slow case
+ // that needs the value in eax.
+ __ AllocateHeapNumber(ebx, edx, edi, &slow);
+ __ mov(eax, Operand(ebx));
+ }
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(ecx));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ push(ecx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(ecx);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
+ } else {
+ UNIMPLEMENTED();
}
+ // Return from the stub.
__ bind(&done);
-
__ StubReturn(1);
+
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ bind(&slow);
+ __ pop(ecx); // pop return address.
+ __ push(eax);
+ __ push(ecx); // push return address
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
__ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor);
-
- // Nothing to do: The formal number of parameters has already been
- // passed in register eax by calling function. Just return it.
- __ ret(0);
// Arguments adaptor case: Read the arguments length from the
// adaptor frame and return it.
- __ bind(&adaptor);
- __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ // Otherwise nothing to do: The number of formal parameters has already been
+ // passed in register eax by calling function. Just return it.
+ if (CpuFeatures::IsSupported(CMOV)) {
+ CpuFeatures::Scope use_cmov(CMOV);
+ __ cmov(equal, eax,
+ Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ } else {
+ Label exit;
+ __ j(not_equal, &exit);
+ __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ bind(&exit);
+ }
__ ret(0);
}
@@ -7441,24 +8009,368 @@
static const int kDisplacement = 2 * kPointerSize;
// Check if the calling frame is an arguments adaptor frame.
- Label runtime;
+ Label adaptor_frame, try_allocate, runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
__ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &runtime);
+ __ j(equal, &adaptor_frame);
+
+ // Get the length from the frame.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+ __ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
+ __ bind(&adaptor_frame);
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ mov(Operand(esp, 1 * kPointerSize), ecx);
__ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
__ mov(Operand(esp, 2 * kPointerSize), edx);
+ // Try the new space allocation. Start out with computing the size of
+ // the arguments object and the elements array.
+ Label add_arguments_object;
+ __ bind(&try_allocate);
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &add_arguments_object);
+ __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
+ __ bind(&add_arguments_object);
+ __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of both objects in one go.
+ __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current (global) context.
+ int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
+ __ mov(edi, Operand(edi, offset));
+
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ mov(ebx, FieldOperand(edi, i));
+ __ mov(FieldOperand(eax, i), ebx);
+ }
+
+ // Setup the callee in-object property.
+ ASSERT(Heap::arguments_callee_index == 0);
+ __ mov(ebx, Operand(esp, 3 * kPointerSize));
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
+
+ // Get the length (smi tagged) and set that as an in-object property too.
+ ASSERT(Heap::arguments_length_index == 1);
+ __ mov(ecx, Operand(esp, 1 * kPointerSize));
+ __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
+
+ // If there are no actual arguments, we're done.
+ Label done;
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &done);
+
+ // Get the parameters pointer from the stack and untag the length.
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ SmiUntag(ecx);
+
+ // Setup the elements pointer in the allocated arguments object and
+ // initialize the header in the elements fixed array.
+ __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(Factory::fixed_array_map()));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+
+ // Copy the fixed array slots.
+ Label loop;
+ __ bind(&loop);
+ __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
+ __ add(Operand(edi), Immediate(kPointerSize));
+ __ sub(Operand(edx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ test(ecx, Operand(ecx));
+ __ j(not_zero, &loop);
+
+ // Return and remove the on-stack parameters.
+ __ bind(&done);
+ __ ret(3 * kPointerSize);
+
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
__ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
}
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if regexp entry in generated code is turned
+ // off.
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+ return;
+ }
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: last_match_info (expected JSArray)
+ // esp[8]: previous index
+ // esp[12]: subject string
+ // esp[16]: JSRegExp object
+
+ Label runtime;
+
+ // Check that the first argument is a JSRegExp object.
+ __ mov(eax, Operand(esp, 4 * kPointerSize));
+ ASSERT_EQ(0, kSmiTag);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
+ __ j(not_equal, &runtime);
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
+#ifdef DEBUG
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
+ __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
+ __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+#endif
+
+ // ecx: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
+ __ j(not_equal, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2. This
+ // uses the asumption that smis are 2 * their untagged value.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ __ add(Operand(edx), Immediate(2)); // edx was a smi.
+ // Check that the static offsets vector buffer is large enough.
+ __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
+ __ j(above, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // edx: Number of capture registers
+ // Check that the second argument is a string.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
+ __ j(NegateCondition(is_string), &runtime);
+ // Get the length of the string to ebx.
+ __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+
+ // ebx: Length of subject string
+ // ecx: RegExp data (FixedArray)
+ // edx: Number of capture registers
+ // Check that the third argument is a positive smi.
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask | 0x80000000));
+ __ j(not_zero, &runtime);
+ // Check that it is not greater than the subject string length.
+ __ SmiUntag(eax);
+ __ cmp(eax, Operand(ebx));
+ __ j(greater, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // edx: Number of capture registers
+ // Check that the fourth object is a JSArray object.
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
+ __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
+ __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(eax, Factory::fixed_array_map());
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmp(edx, Operand(eax));
+ __ j(greater, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // Check the representation and encoding of the subject string (only support
+ // flat ascii strings).
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ and_(ebx, kStringRepresentationMask | kStringEncodingMask);
+ __ cmp(ebx, kSeqStringTag | kAsciiStringTag);
+ __ j(not_equal, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ mov(eax, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ test(eax, Operand(eax));
+ __ j(zero, &runtime, not_taken);
+
+ // ecx: RegExp data (FixedArray)
+ // Check that the irregexp code has been generated for an ascii string. If
+ // it has the field contains a code object otherwise it contains the hole.
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
+ __ CmpObjectType(edx, CODE_TYPE, ebx);
+ __ j(not_equal, &runtime);
+
+ // Load used arguments before starting to push arguments for call to native
+ // RegExp code to avoid handling changing stack height.
+ __ mov(eax, Operand(esp, 3 * kPointerSize)); // Subject string.
+ __ mov(ebx, Operand(esp, 2 * kPointerSize)); // Previous index.
+ __ mov(ecx, Operand(esp, 4 * kPointerSize)); // JSRegExp object.
+ __ SmiUntag(ebx); // Previous index from sim.
+
+ // eax: subject string
+ // ebx: previous index
+ // edx: code
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(&Counters::regexp_entry_native, 1);
+
+ // Argument 8: Indicate that this is a direct call from JavaScript.
+ __ push(Immediate(1));
+
+ // Argument 7: Start (high end) of backtracking stack memory area.
+ __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
+ __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ push(ecx);
+
+ // Argument 6: At start of string?
+ __ xor_(Operand(ecx), ecx); // setcc only operated on cl (lower byte of ecx).
+ __ test(ebx, Operand(ebx));
+ __ setcc(zero, ecx); // 1 if 0 (start of string), 0 if positive.
+ __ push(ecx);
+
+ // Argument 5: static offsets vector buffer.
+ __ push(Immediate(ExternalReference::address_of_static_offsets_vector()));
+
+ // Argument 4: End of string data.
+ __ mov(ecx, FieldOperand(eax, String::kLengthOffset));
+ __ add(ecx, Operand(eax));
+ __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ push(ecx);
+
+ // Argument 3: Start of string data.
+ __ mov(ecx, ebx);
+ __ add(ebx, Operand(eax)); // String is ASCII.
+ __ add(Operand(ebx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ push(ebx);
+
+ // Argument 2: Previous index.
+ __ push(ecx);
+
+ // Argument 1: Subject string.
+ __ push(eax);
+
+ // Locate the code entry and call it.
+ __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(Operand(edx));
+ // Remove arguments.
+ __ add(Operand(esp), Immediate(8 * kPointerSize));
+
+ // Check the result.
+ Label success;
+ __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
+ __ j(equal, &success, taken);
+ Label failure;
+ __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
+ __ j(equal, &failure, taken);
+ __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ j(not_equal, &runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ mov(eax,
+ Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+ __ cmp(eax, Operand::StaticVariable(pending_exception));
+ __ j(equal, &runtime);
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ mov(Operand(eax), Factory::null_value());
+ __ ret(4 * kPointerSize);
+
+ // Load RegExp data.
+ __ bind(&success);
+ __ mov(eax, Operand(esp, 4 * kPointerSize));
+ __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ __ add(Operand(edx), Immediate(2)); // edx was a smi.
+
+ // edx: Number of capture registers
+ // Load last_match_info which is still known to be a fast case JSArray.
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
+
+ // ebx: last_match_info backing store (FixedArray)
+ // edx: number of capture registers
+ // Store the capture count.
+ __ SmiTag(edx); // Number of capture registers to smi.
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
+ __ SmiUntag(edx); // Number of capture registers back from smi.
+ // Store last subject and last input.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
+ __ mov(ecx, ebx);
+ __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
+ __ mov(ecx, ebx);
+ __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
+
+ // Get the static offsets vector filled by the native regexp code.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector();
+ __ mov(ecx, Immediate(address_of_static_offsets_vector));
+
+ // ebx: last_match_info backing store (FixedArray)
+ // ecx: offsets vector
+ // edx: number of capture registers
+ Label next_capture, done;
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // Read previous index.
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ bind(&next_capture);
+ __ sub(Operand(edx), Immediate(1));
+ __ j(negative, &done);
+ // Read the value from the static offsets vector buffer.
+ __ mov(edi, Operand(ecx, edx, times_pointer_size, 0));
+ // Perform explicit shift
+ ASSERT_EQ(0, kSmiTag);
+ __ shl(edi, kSmiTagSize);
+ // Add previous index (from its stack slot) if value is not negative.
+ Label capture_negative;
+ // Carry flag set by shift above.
+ __ j(negative, &capture_negative, not_taken);
+ __ add(edi, Operand(eax)); // Add previous index (adding smi to smi).
+ __ bind(&capture_negative);
+ // Store the smi value in the last match info.
+ __ mov(FieldOperand(ebx,
+ edx,
+ times_pointer_size,
+ RegExpImpl::kFirstCaptureOffset),
+ edi);
+ __ jmp(&next_capture);
+ __ bind(&done);
+
+ // Return last match info.
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ ret(4 * kPointerSize);
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+}
+
+
void CompareStub::Generate(MacroAssembler* masm) {
Label call_builtin, done;
@@ -7476,35 +8388,41 @@
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
- Label return_equal;
- Label heap_number;
- // If it's not a heap number, then return equal.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- __ j(equal, &heap_number);
- __ bind(&return_equal);
- __ Set(eax, Immediate(0));
- __ ret(0);
+ if (never_nan_nan_) {
+ __ Set(eax, Immediate(0));
+ __ ret(0);
+ } else {
+ Label return_equal;
+ Label heap_number;
+ // If it's not a heap number, then return equal.
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ __ j(equal, &heap_number);
+ __ bind(&return_equal);
+ __ Set(eax, Immediate(0));
+ __ ret(0);
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // We only accept QNaNs, which have bit 51 set.
- // Read top bits of double representation (second word of value).
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if
+ // it's not NaN.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // We only accept QNaNs, which have bit 51 set.
+ // Read top bits of double representation (second word of value).
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
- // all bits in the mask are set. We only need to check the word
- // that contains the exponent and high bit of the mantissa.
- ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
- __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(eax, Operand(eax));
- // Shift value and mask so kQuietNaNHighBitsMask applies to topmost bits.
- __ add(edx, Operand(edx));
- __ cmp(edx, kQuietNaNHighBitsMask << 1);
- __ setcc(above_equal, eax);
- __ ret(0);
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+ // all bits in the mask are set. We only need to check the word
+ // that contains the exponent and high bit of the mantissa.
+ ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
+ __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(eax, Operand(eax));
+ // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
+ // bits.
+ __ add(edx, Operand(edx));
+ __ cmp(edx, kQuietNaNHighBitsMask << 1);
+ __ setcc(above_equal, eax);
+ __ ret(0);
+ }
__ bind(¬_identical);
}
@@ -7651,9 +8569,10 @@
// Fast negative check for symbol-to-symbol equality.
__ bind(&check_for_symbols);
+ Label check_for_strings;
if (cc_ == equal) {
- BranchIfNonSymbol(masm, &call_builtin, eax, ecx);
- BranchIfNonSymbol(masm, &call_builtin, edx, ecx);
+ BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
+ BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
// We've already checked for object identity, so if both operands
// are symbols they aren't equal. Register eax already holds a
@@ -7661,6 +8580,44 @@
__ ret(2 * kPointerSize);
}
+ __ bind(&check_for_strings);
+
+ // Check that both objects are not smis.
+ ASSERT_EQ(0, kSmiTag);
+ __ mov(ebx, Operand(edx));
+ __ and_(ebx, Operand(eax));
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &call_builtin);
+
+ // Load instance type for both objects.
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ Label non_ascii_flat;
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiString =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ __ and_(ecx, kFlatAsciiString);
+ __ cmp(ecx, kStringTag | kSeqStringTag | kAsciiStringTag);
+ __ j(not_equal, &call_builtin);
+ __ and_(ebx, kFlatAsciiString);
+ __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
+ __ j(not_equal, &call_builtin);
+
+ // Inline comparison of ascii strings.
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ edx,
+ eax,
+ ecx,
+ ebx,
+ edi);
+#ifdef DEBUG
+ __ Abort("Unexpected fall-through from string comparison");
+#endif
+
__ bind(&call_builtin);
// must swap argument order
__ pop(ecx);
@@ -7725,6 +8682,33 @@
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow;
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // +1 ~ return address
+ Label receiver_is_value, receiver_is_js_object;
+ __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &receiver_is_value, not_taken);
+
+ // Check if the receiver is a valid JS object.
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi);
+ __ j(above_equal, &receiver_is_js_object);
+
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax);
+
+ __ bind(&receiver_is_js_object);
+ }
+
// Get the function to call from the stack.
// +2 ~ receiver, return address
__ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
@@ -8229,10 +9213,55 @@
}
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+const char* CompareStub::GetName() {
+ switch (cc_) {
+ case less: return "CompareStub_LT";
+ case greater: return "CompareStub_GT";
+ case less_equal: return "CompareStub_LE";
+ case greater_equal: return "CompareStub_GE";
+ case not_equal: {
+ if (strict_) {
+ if (never_nan_nan_) {
+ return "CompareStub_NE_STRICT_NO_NAN";
+ } else {
+ return "CompareStub_NE_STRICT";
+ }
+ } else {
+ if (never_nan_nan_) {
+ return "CompareStub_NE_NO_NAN";
+ } else {
+ return "CompareStub_NE";
+ }
+ }
+ }
+ case equal: {
+ if (strict_) {
+ if (never_nan_nan_) {
+ return "CompareStub_EQ_STRICT_NO_NAN";
+ } else {
+ return "CompareStub_EQ_STRICT";
+ }
+ } else {
+ if (never_nan_nan_) {
+ return "CompareStub_EQ_NO_NAN";
+ } else {
+ return "CompareStub_EQ";
+ }
+ }
+ }
+ default: return "CompareStub";
+ }
+}
+
+
int CompareStub::MinorKey() {
- // Encode the two parameters in a unique 16 bit value.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
- return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
+ // Encode the three parameters in a unique 16 bit value.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
+ int nnn_value = (never_nan_nan_ ? 2 : 0);
+ if (cc_ != equal) nnn_value = 0; // Avoid duplicate stubs.
+ return (static_cast<unsigned>(cc_) << 2) | nnn_value | (strict_ ? 1 : 0);
}
@@ -8306,6 +9335,7 @@
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
__ and_(ecx, Operand(edi));
+ ASSERT(kStringEncodingMask == kAsciiStringTag);
__ test(ecx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii);
// Allocate an acsii cons string.
@@ -8348,7 +9378,7 @@
Label non_ascii_string_add_flat_result;
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- ASSERT(kAsciiStringTag != 0);
+ ASSERT(kStringEncodingMask == kAsciiStringTag);
__ test(ecx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii_string_add_flat_result);
__ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
@@ -8427,12 +9457,12 @@
}
-void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
+void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
Label loop;
__ bind(&loop);
// This loop just copies one character at a time, as it is only used for very
@@ -8453,6 +9483,316 @@
}
+void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ // Copy characters using rep movs of doublewords. Align destination on 4 byte
+ // boundary before starting rep movs. Copy remaining characters after running
+ // rep movs.
+ ASSERT(dest.is(edi)); // rep movs destination
+ ASSERT(src.is(esi)); // rep movs source
+ ASSERT(count.is(ecx)); // rep movs count
+ ASSERT(!scratch.is(dest));
+ ASSERT(!scratch.is(src));
+ ASSERT(!scratch.is(count));
+
+ // Nothing to do for zero characters.
+ Label done;
+ __ test(count, Operand(count));
+ __ j(zero, &done);
+
+ // Make count the number of bytes to copy.
+ if (!ascii) {
+ __ shl(count, 1);
+ }
+
+ // Don't enter the rep movs if there are less than 4 bytes to copy.
+ Label last_bytes;
+ __ test(count, Immediate(~3));
+ __ j(zero, &last_bytes);
+
+ // Copy from edi to esi using rep movs instruction.
+ __ mov(scratch, count);
+ __ sar(count, 2); // Number of doublewords to copy.
+ __ rep_movs();
+
+ // Find number of bytes left.
+ __ mov(count, scratch);
+ __ and_(count, 3);
+
+ // Check if there are more bytes to copy.
+ __ bind(&last_bytes);
+ __ test(count, Operand(count));
+ __ j(zero, &done);
+
+ // Copy remaining characters.
+ Label loop;
+ __ bind(&loop);
+ __ mov_b(scratch, Operand(src, 0));
+ __ mov_b(Operand(dest, 0), scratch);
+ __ add(Operand(src), Immediate(1));
+ __ add(Operand(dest), Immediate(1));
+ __ sub(Operand(count), Immediate(1));
+ __ j(not_zero, &loop);
+
+ __ bind(&done);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: to
+ // esp[8]: from
+ // esp[12]: string
+
+ // Make sure first argument is a string.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ ASSERT_EQ(0, kSmiTag);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
+ __ j(NegateCondition(is_string), &runtime);
+
+ // eax: string
+ // ebx: instance type
+ // Calculate length of sub string using the smi values.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize)); // to
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &runtime);
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // from
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &runtime);
+ __ sub(ecx, Operand(edx));
+ // Handle sub-strings of length 2 and less in the runtime system.
+ __ SmiUntag(ecx); // Result length is no longer smi.
+ __ cmp(ecx, 2);
+ __ j(below_equal, &runtime);
+
+ // eax: string
+ // ebx: instance type
+ // ecx: result string length
+ // Check for flat ascii string
+ Label non_ascii_flat;
+ __ and_(ebx, kStringRepresentationMask | kStringEncodingMask);
+ __ cmp(ebx, kSeqStringTag | kAsciiStringTag);
+ __ j(not_equal, &non_ascii_flat);
+
+ // Allocate the result.
+ __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
+
+ // eax: result string
+ // ecx: result string length
+ __ mov(edx, esi); // esi used by following code.
+ // Locate first character of result.
+ __ mov(edi, eax);
+ __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Load string argument and locate character of sub string start.
+ __ mov(esi, Operand(esp, 3 * kPointerSize));
+ __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
+ __ SmiUntag(ebx);
+ __ add(esi, Operand(ebx));
+
+ // eax: result string
+ // ecx: result length
+ // edx: original value of esi
+ // edi: first character of result
+ // esi: character of sub string start
+ GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
+ __ mov(esi, edx); // Restore esi.
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&non_ascii_flat);
+ // eax: string
+ // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
+ // ecx: result string length
+ // Check for flat two byte string
+ __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
+ __ j(not_equal, &runtime);
+
+ // Allocate the result.
+ __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
+
+ // eax: result string
+ // ecx: result string length
+ __ mov(edx, esi); // esi used by following code.
+ // Locate first character of result.
+ __ mov(edi, eax);
+ __ add(Operand(edi),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Load string argument and locate character of sub string start.
+ __ mov(esi, Operand(esp, 3 * kPointerSize));
+ __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
+ // As from is a smi it is 2 times the value which matches the size of a two
+ // byte character.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ __ add(esi, Operand(ebx));
+
+ // eax: result string
+ // ecx: result length
+ // edx: original value of esi
+ // edi: first character of result
+ // esi: character of sub string start
+ GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
+ __ mov(esi, edx); // Restore esi.
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(3 * kPointerSize);
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ Label compare_lengths, compare_lengths_1;
+
+ // Find minimum length. If either length is zero just compare lengths.
+ __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ test(scratch1, Operand(scratch1));
+ __ j(zero, &compare_lengths_1);
+ __ mov(scratch2, FieldOperand(right, String::kLengthOffset));
+ __ test(scratch2, Operand(scratch2));
+ __ j(zero, &compare_lengths_1);
+ __ cmp(scratch1, Operand(scratch2));
+ if (CpuFeatures::IsSupported(CMOV)) {
+ CpuFeatures::Scope use_cmov(CMOV);
+ __ cmov(greater, scratch1, Operand(scratch2));
+ } else {
+ Label l;
+ __ j(less, &l);
+ __ mov(scratch1, scratch2);
+ __ bind(&l);
+ }
+
+ Label result_greater, result_less;
+ Label loop;
+ // Compare next character.
+ __ mov(scratch3, Immediate(-1)); // Index into strings.
+ __ bind(&loop);
+ // Compare characters.
+ Label character_compare_done;
+ __ add(Operand(scratch3), Immediate(1));
+ __ mov_b(scratch2, Operand(left,
+ scratch3,
+ times_1,
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ subb(scratch2, Operand(right,
+ scratch3,
+ times_1,
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ j(not_equal, &character_compare_done);
+ __ sub(Operand(scratch1), Immediate(1));
+ __ j(not_zero, &loop);
+ // If min length characters match compare lengths otherwise last character
+ // compare is the result.
+ __ bind(&character_compare_done);
+ __ j(equal, &compare_lengths);
+ __ j(less, &result_less);
+ __ jmp(&result_greater);
+
+ // Compare lengths.
+ Label result_not_equal;
+ __ bind(&compare_lengths);
+ __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ bind(&compare_lengths_1);
+ __ sub(scratch1, FieldOperand(right, String::kLengthOffset));
+ __ j(not_zero, &result_not_equal);
+
+ // Result is EQUAL.
+ ASSERT_EQ(0, EQUAL);
+ ASSERT_EQ(0, kSmiTag);
+ __ xor_(eax, Operand(eax));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&result_not_equal);
+ __ j(greater, &result_greater);
+
+ // Result is LESS.
+ __ bind(&result_less);
+ __ mov(eax, Immediate(Smi::FromInt(LESS)->value()));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Result is GREATER.
+ __ bind(&result_greater);
+ __ mov(eax, Immediate(Smi::FromInt(GREATER)->value()));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: right string
+ // esp[8]: left string
+
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
+ __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
+
+ Label not_same;
+ __ cmp(edx, Operand(eax));
+ __ j(not_equal, ¬_same);
+ ASSERT_EQ(0, EQUAL);
+ ASSERT_EQ(0, kSmiTag);
+ __ xor_(eax, Operand(eax));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(¬_same);
+
+ // Check that both objects are not smis.
+ ASSERT_EQ(0, kSmiTag);
+ __ mov(ebx, Operand(edx));
+ __ and_(ebx, Operand(eax));
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+
+ // Load instance type for both strings.
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ Label non_ascii_flat;
+ __ and_(ecx, kStringRepresentationMask | kStringEncodingMask);
+ __ cmp(ecx, kSeqStringTag | kAsciiStringTag);
+ __ j(not_equal, &non_ascii_flat);
+ const int kFlatAsciiString =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ __ and_(ebx, kFlatAsciiString);
+ __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
+ __ j(not_equal, &non_ascii_flat);
+
+ // Compare flat ascii strings.
+ GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
+
+ __ bind(&non_ascii_flat);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
+}
+
#undef __
} } // namespace v8::internal
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 11a5163..56cf978 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -266,7 +266,7 @@
// -------------------------------------------------------------------------
-// Arguments allocation mode
+// Arguments allocation mode.
enum ArgumentsAllocationMode {
NO_ARGUMENTS_ALLOCATION,
@@ -434,7 +434,7 @@
void GenericBinaryOperation(
Token::Value op,
- SmiAnalysis* type,
+ StaticType* type,
OverwriteMode overwrite_mode);
// If possible, combine two constant smi values using op to produce
@@ -447,7 +447,7 @@
void ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> constant_operand,
- SmiAnalysis* type,
+ StaticType* type,
bool reversed,
OverwriteMode overwrite_mode);
@@ -459,7 +459,8 @@
Result* right,
OverwriteMode overwrite_mode);
- void Comparison(Condition cc,
+ void Comparison(AstNode* node,
+ Condition cc,
bool strict,
ControlDestination* destination);
@@ -474,7 +475,9 @@
void StoreUnsafeSmiToLocal(int offset, Handle<Object> value);
void PushUnsafeSmi(Handle<Object> value);
- void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+ void CallWithArguments(ZoneList<Expression*>* arguments,
+ CallFunctionFlags flags,
+ int position);
// Use an optimized version of Function.prototype.apply that avoid
// allocating the arguments object and just copies the arguments
@@ -540,15 +543,18 @@
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
- // Fast support for Math.sin and Math.cos.
- enum MathOp { SIN, COS };
- void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
- inline void GenerateMathSin(ZoneList<Expression*>* args);
- inline void GenerateMathCos(ZoneList<Expression*>* args);
-
// Fast support for StringAdd.
void GenerateStringAdd(ZoneList<Expression*>* args);
+ // Fast support for SubString.
+ void GenerateSubString(ZoneList<Expression*>* args);
+
+ // Fast support for StringCompare.
+ void GenerateStringCompare(ZoneList<Expression*>* args);
+
+ // Support for direct calls from JavaScript to native RegExp code.
+ void GenerateRegExpExec(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -616,39 +622,6 @@
};
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#ifdef DEBUG
- void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
-#endif
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0,
@@ -665,7 +638,8 @@
mode_(mode),
flags_(flags),
args_in_registers_(false),
- args_reversed_(false) {
+ args_reversed_(false),
+ name_(NULL) {
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -684,6 +658,7 @@
bool args_in_registers_; // Arguments passed in registers not on the stack.
bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
+ char* name_;
const char* GetName();
@@ -725,8 +700,8 @@
bool ArgsInRegistersSupported() {
return ((op_ == Token::ADD) || (op_ == Token::SUB)
- || (op_ == Token::MUL) || (op_ == Token::DIV))
- && flags_ != NO_SMI_CODE_IN_STUB;
+ || (op_ == Token::MUL) || (op_ == Token::DIV))
+ && flags_ != NO_SMI_CODE_IN_STUB;
}
bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL);
@@ -747,7 +722,32 @@
};
-class StringAddStub: public CodeStub {
+class StringStubBase: public CodeStub {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersREP adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
+
+ // Generate code for copying characters using the rep movs instruction.
+ // Copies ecx characters from esi to edi. Copying of overlapping regions is
+ // not supported.
+ void GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest, // Must be edi.
+ Register src, // Must be esi.
+ Register count, // Must be ecx.
+ Register scratch, // Neither of the above.
+ bool ascii);
+};
+
+
+class StringAddStub: public StringStubBase {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
@@ -759,18 +759,45 @@
void Generate(MacroAssembler* masm);
- void GenerateCopyCharacters(MacroAssembler* masm,
- Register desc,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
// Should the stub check whether arguments are strings?
bool string_check_;
};
+class SubStringStub: public StringStubBase {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public StringStubBase {
+ public:
+ explicit StringCompareStub() {
+ }
+
+ // Compare two flat ascii strings and returns result in eax after popping two
+ // arguments from the stack.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ private:
+ Major MajorKey() { return StringCompare; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index df5a28a..1fbaa3c 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -61,7 +61,9 @@
{0x0B, "or", REG_OPER_OP_ORDER},
{0x1B, "sbb", REG_OPER_OP_ORDER},
{0x29, "sub", OPER_REG_OP_ORDER},
+ {0x2A, "subb", REG_OPER_OP_ORDER},
{0x2B, "sub", REG_OPER_OP_ORDER},
+ {0x84, "test_b", REG_OPER_OP_ORDER},
{0x85, "test", REG_OPER_OP_ORDER},
{0x31, "xor", OPER_REG_OP_ORDER},
{0x33, "xor", REG_OPER_OP_ORDER},
@@ -1007,7 +1009,16 @@
case 0x80:
{ data++;
- AppendToBuffer("%s ", "cmpb");
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ const char* mnem = NULL;
+ printf("%d\n", regop);
+ switch (regop) {
+ case 5: mnem = "subb"; break;
+ case 7: mnem = "cmpb"; break;
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
data += PrintRightOperand(data);
int32_t imm = *data;
AppendToBuffer(",0x%x", imm);
@@ -1049,6 +1060,27 @@
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0x57) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("xorpd %s,%s",
+ NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x6F) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ } else if (*data == 0x7F) {
+ AppendToBuffer("movdqa ");
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else {
UnimplementedInstruction();
}
@@ -1085,6 +1117,11 @@
data += 2;
break;
+ case 0x2C:
+ AppendToBuffer("subb eax,0x%x", *reinterpret_cast<uint8_t*>(data+1));
+ data += 2;
+ break;
+
case 0xA9:
AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
data += 5;
@@ -1155,9 +1192,29 @@
break;
case 0xF3:
- if (*(data+1) == 0x0F && *(data+2) == 0x2C) {
- data += 3;
- data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
+ if (*(data+1) == 0x0F) {
+ if (*(data+2) == 0x2C) {
+ data += 3;
+ data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
+ } else if (*(data+2) == 0x6F) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ } else if (*(data+2) == 0x7F) {
+ AppendToBuffer("movdqu ");
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, ®op, &rm);
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else {
+ UnimplementedInstruction();
+ }
+ } else if (*(data+1) == 0xA5) {
+ data += 2;
+ AppendToBuffer("rep_movs");
} else {
UnimplementedInstruction();
}
@@ -1177,6 +1234,9 @@
}
int instr_len = data - instr;
+ if (instr_len == 0) {
+ printf("%02x", *data);
+ }
ASSERT(instr_len > 0); // Ensure progress.
int outp = 0;
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/fast-codegen-ia32.cc
index 807ebd4..f485d9e 100644
--- a/src/ia32/fast-codegen-ia32.cc
+++ b/src/ia32/fast-codegen-ia32.cc
@@ -29,9 +29,9 @@
#include "codegen-inl.h"
#include "compiler.h"
+#include "debug.h"
#include "fast-codegen.h"
#include "parser.h"
-#include "debug.h"
namespace v8 {
namespace internal {
@@ -116,7 +116,7 @@
__ push(Immediate(Smi::FromInt(fun->num_parameters())));
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
+ // The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
__ CallStub(&stub);
@@ -127,7 +127,6 @@
Move(dot_arguments_slot, ecx, ebx, edx);
}
-
{ Comment cmnt(masm_, "[ Declarations");
VisitDeclarations(fun->scope()->declarations());
}
@@ -194,219 +193,466 @@
}
-void FastCodeGenerator::Move(Expression::Context context, Register source) {
+void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
+
case Expression::kEffect:
+ // Nothing to do.
break;
+
case Expression::kValue:
- __ push(source);
+ // Move value into place.
+ switch (location_) {
+ case kAccumulator:
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ break;
+ case kStack:
+ __ push(reg);
+ break;
+ }
break;
+
case Expression::kTest:
- TestAndBranch(source, true_label_, false_label_);
+ // For simplicity we always test the accumulator register.
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ DoTest(context);
break;
- case Expression::kValueTest: {
- Label discard;
- __ push(source);
- TestAndBranch(source, true_label_, &discard);
- __ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ jmp(false_label_);
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ DoTest(context);
break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ push(source);
- TestAndBranch(source, &discard, false_label_);
- __ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ jmp(true_label_);
- }
}
}
-template <>
-Operand FastCodeGenerator::CreateSlotOperand<Operand>(Slot* source,
- Register scratch) {
- switch (source->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- return Operand(ebp, SlotOffset(source));
- case Slot::CONTEXT: {
- int context_chain_length =
- function_->scope()->ContextChainLength(source->var()->scope());
- __ LoadContext(scratch, context_chain_length);
- return CodeGenerator::ContextOperand(scratch, source->index());
- break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- // Fall-through.
- default:
- UNREACHABLE();
- return Operand(eax, 0); // Dead code to make the compiler happy.
- }
-}
-
-
-void FastCodeGenerator::Move(Register dst, Slot* source) {
- Operand location = CreateSlotOperand<Operand>(source, dst);
- __ mov(dst, location);
-}
-
-
-void FastCodeGenerator::Move(Expression::Context context,
- Slot* source,
- Register scratch) {
+void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
+ // Nothing to do.
break;
case Expression::kValue: {
- Operand location = CreateSlotOperand<Operand>(source, scratch);
- __ push(location);
+ MemOperand slot_operand = EmitSlotSearch(slot, result_register());
+ switch (location_) {
+ case kAccumulator:
+ __ mov(result_register(), slot_operand);
+ break;
+ case kStack:
+ // Memory operands can be pushed directly.
+ __ push(slot_operand);
+ break;
+ }
break;
}
- case Expression::kTest: // Fall through.
- case Expression::kValueTest: // Fall through.
+
+ case Expression::kTest:
+ // For simplicity we always test the accumulator register.
+ Move(result_register(), slot);
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
case Expression::kTestValue:
- Move(scratch, source);
- Move(context, scratch);
+ Move(result_register(), slot);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ DoTest(context);
break;
}
}
-void FastCodeGenerator::Move(Expression::Context context, Literal* expr) {
+void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
+ // Nothing to do.
break;
case Expression::kValue:
- __ push(Immediate(expr->handle()));
+ switch (location_) {
+ case kAccumulator:
+ __ mov(result_register(), lit->handle());
+ break;
+ case kStack:
+ // Immediates can be pushed directly.
+ __ push(Immediate(lit->handle()));
+ break;
+ }
break;
- case Expression::kTest: // Fall through.
- case Expression::kValueTest: // Fall through.
+
+ case Expression::kTest:
+ // For simplicity we always test the accumulator register.
+ __ mov(result_register(), lit->handle());
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
case Expression::kTestValue:
- __ mov(eax, expr->handle());
- Move(context, eax);
+ __ mov(result_register(), lit->handle());
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ DoTest(context);
break;
}
}
+void FastCodeGenerator::ApplyTOS(Expression::Context context) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ __ Drop(1);
+ break;
+
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ pop(result_register());
+ break;
+ case kStack:
+ break;
+ }
+ break;
+
+ case Expression::kTest:
+ // For simplicity we always test the accumulator register.
+ __ pop(result_register());
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ __ pop(result_register());
+ break;
+ case kStack:
+ __ mov(result_register(), Operand(esp, 0));
+ break;
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::DropAndApply(int count,
+ Expression::Context context,
+ Register reg) {
+ ASSERT(count > 0);
+ ASSERT(!reg.is(esp));
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ __ Drop(count);
+ break;
+
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ Drop(count);
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ break;
+ case kStack:
+ if (count > 1) __ Drop(count - 1);
+ __ mov(Operand(esp, 0), reg);
+ break;
+ }
+ break;
+
+ case Expression::kTest:
+ // For simplicity we always test the accumulator register.
+ __ Drop(count);
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ __ Drop(count);
+ if (!reg.is(result_register())) __ mov(result_register(), reg);
+ break;
+ case kStack:
+ if (count > 1) __ Drop(count - 1);
+ __ mov(result_register(), reg);
+ __ mov(Operand(esp, 0), result_register());
+ break;
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Apply(Expression::Context context,
+ Label* materialize_true,
+ Label* materialize_false) {
+ switch (context) {
+ case Expression::kUninitialized:
+
+ case Expression::kEffect:
+ ASSERT_EQ(materialize_true, materialize_false);
+ __ bind(materialize_true);
+ break;
+
+ case Expression::kValue: {
+ Label done;
+ switch (location_) {
+ case kAccumulator:
+ __ bind(materialize_true);
+ __ mov(result_register(), Factory::true_value());
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ mov(result_register(), Factory::false_value());
+ break;
+ case kStack:
+ __ bind(materialize_true);
+ __ push(Immediate(Factory::true_value()));
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ push(Immediate(Factory::false_value()));
+ break;
+ }
+ __ bind(&done);
+ break;
+ }
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ __ bind(materialize_true);
+ switch (location_) {
+ case kAccumulator:
+ __ mov(result_register(), Factory::true_value());
+ break;
+ case kStack:
+ __ push(Immediate(Factory::true_value()));
+ break;
+ }
+ __ jmp(true_label_);
+ break;
+
+ case Expression::kTestValue:
+ __ bind(materialize_false);
+ switch (location_) {
+ case kAccumulator:
+ __ mov(result_register(), Factory::false_value());
+ break;
+ case kStack:
+ __ push(Immediate(Factory::false_value()));
+ break;
+ }
+ __ jmp(false_label_);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::DoTest(Expression::Context context) {
+ // The value to test is in the accumulator. If the value might be needed
+ // on the stack (value/test and test/value contexts with a stack location
+ // desired), then the value is already duplicated on the stack.
+ ASSERT_NE(NULL, true_label_);
+ ASSERT_NE(NULL, false_label_);
+
+ // In value/test and test/value expression contexts with stack as the
+ // desired location, there is already an extra value on the stack. Use a
+ // label to discard it if unneeded.
+ Label discard;
+ Label* if_true = true_label_;
+ Label* if_false = false_label_;
+ switch (context) {
+ case Expression::kUninitialized:
+ case Expression::kEffect:
+ case Expression::kValue:
+ UNREACHABLE();
+ case Expression::kTest:
+ break;
+ case Expression::kValueTest:
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ if_false = &discard;
+ break;
+ }
+ break;
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ if_true = &discard;
+ break;
+ }
+ break;
+ }
+
+ // Emit the inlined tests assumed by the stub.
+ __ cmp(result_register(), Factory::undefined_value());
+ __ j(equal, if_false);
+ __ cmp(result_register(), Factory::true_value());
+ __ j(equal, if_true);
+ __ cmp(result_register(), Factory::false_value());
+ __ j(equal, if_false);
+ ASSERT_EQ(0, kSmiTag);
+ __ test(result_register(), Operand(result_register()));
+ __ j(zero, if_false);
+ __ test(result_register(), Immediate(kSmiTagMask));
+ __ j(zero, if_true);
+
+ // Save a copy of the value if it may be needed and isn't already saved.
+ switch (context) {
+ case Expression::kUninitialized:
+ case Expression::kEffect:
+ case Expression::kValue:
+ UNREACHABLE();
+ case Expression::kTest:
+ break;
+ case Expression::kValueTest:
+ switch (location_) {
+ case kAccumulator:
+ __ push(result_register());
+ break;
+ case kStack:
+ break;
+ }
+ break;
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ __ push(result_register());
+ break;
+ case kStack:
+ break;
+ }
+ break;
+ }
+
+ // Call the ToBoolean stub for all other cases.
+ ToBooleanStub stub;
+ __ push(result_register());
+ __ CallStub(&stub);
+ __ test(eax, Operand(eax));
+
+ // The stub returns nonzero for true. Complete based on the context.
+ switch (context) {
+ case Expression::kUninitialized:
+ case Expression::kEffect:
+ case Expression::kValue:
+ UNREACHABLE();
+
+ case Expression::kTest:
+ __ j(not_zero, true_label_);
+ __ jmp(false_label_);
+ break;
+
+ case Expression::kValueTest:
+ switch (location_) {
+ case kAccumulator:
+ __ j(zero, &discard);
+ __ pop(result_register());
+ __ jmp(true_label_);
+ break;
+ case kStack:
+ __ j(not_zero, true_label_);
+ break;
+ }
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(false_label_);
+ break;
+
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ __ j(not_zero, &discard);
+ __ pop(result_register());
+ __ jmp(false_label_);
+ break;
+ case kStack:
+ __ j(zero, false_label_);
+ break;
+ }
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(true_label_);
+ break;
+ }
+}
+
+
+MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return Operand(ebp, SlotOffset(slot));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(slot->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return CodeGenerator::ContextOperand(scratch, slot->index());
+ }
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return Operand(eax, 0);
+}
+
+
+void FastCodeGenerator::Move(Register destination, Slot* source) {
+ MemOperand location = EmitSlotSearch(source, destination);
+ __ mov(destination, location);
+}
+
+
void FastCodeGenerator::Move(Slot* dst,
Register src,
Register scratch1,
Register scratch2) {
- switch (dst->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- __ mov(Operand(ebp, SlotOffset(dst)), src);
- break;
- case Slot::CONTEXT: {
- ASSERT(!src.is(scratch1));
- ASSERT(!src.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- int context_chain_length =
- function_->scope()->ContextChainLength(dst->var()->scope());
- __ LoadContext(scratch1, context_chain_length);
- __ mov(Operand(scratch1, Context::SlotOffset(dst->index())), src);
- int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
- __ RecordWrite(scratch1, offset, src, scratch2);
- break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- default:
- UNREACHABLE();
+ ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
+ ASSERT(!scratch1.is(src) && !scratch2.is(src));
+ MemOperand location = EmitSlotSearch(dst, scratch1);
+ __ mov(location, src);
+ // Emit the write barrier code if the location is in the heap.
+ if (dst->type() == Slot::CONTEXT) {
+ int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
+ __ RecordWrite(scratch1, offset, src, scratch2);
}
}
-void FastCodeGenerator::DropAndMove(Expression::Context context,
- Register source,
- int count) {
- ASSERT(count > 0);
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- __ add(Operand(esp), Immediate(count * kPointerSize));
- break;
- case Expression::kValue:
- if (count > 1) {
- __ add(Operand(esp), Immediate((count - 1) * kPointerSize));
- }
- __ mov(Operand(esp, 0), source);
- break;
- case Expression::kTest:
- ASSERT(!source.is(esp));
- __ add(Operand(esp), Immediate(count * kPointerSize));
- TestAndBranch(source, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (count > 1) {
- __ add(Operand(esp), Immediate((count - 1) * kPointerSize));
- }
- __ mov(Operand(esp, 0), source);
- TestAndBranch(source, true_label_, &discard);
- __ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (count > 1) {
- __ add(Operand(esp), Immediate((count - 1) * kPointerSize));
- }
- __ mov(Operand(esp, 0), source);
- TestAndBranch(source, &discard, false_label_);
- __ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ jmp(true_label_);
- break;
- }
- }
-}
-
-
-void FastCodeGenerator::TestAndBranch(Register source,
- Label* true_label,
- Label* false_label) {
- ASSERT_NE(NULL, true_label);
- ASSERT_NE(NULL, false_label);
- // Use the shared ToBoolean stub to compile the value in the register into
- // control flow to the code generator's true and false labels. Perform
- // the fast checks assumed by the stub.
- __ cmp(source, Factory::undefined_value()); // The undefined value is false.
- __ j(equal, false_label);
- __ cmp(source, Factory::true_value()); // True is true.
- __ j(equal, true_label);
- __ cmp(source, Factory::false_value()); // False is false.
- __ j(equal, false_label);
- ASSERT_EQ(0, kSmiTag);
- __ test(source, Operand(source)); // The smi zero is false.
- __ j(zero, false_label);
- __ test(source, Immediate(kSmiTagMask)); // All other smis are true.
- __ j(zero, true_label);
-
- // Call the stub for all other cases.
- __ push(source);
- ToBooleanStub stub;
- __ CallStub(&stub);
- __ test(eax, Operand(eax)); // The stub returns nonzero for true.
- __ j(not_zero, true_label);
- __ jmp(false_label);
-}
-
-
void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = decl->proxy()->var();
@@ -416,18 +662,21 @@
if (slot != NULL) {
switch (slot->type()) {
- case Slot::PARAMETER: // Fall through.
+ case Slot::PARAMETER:
case Slot::LOCAL:
if (decl->mode() == Variable::CONST) {
- __ mov(Operand(ebp, SlotOffset(var->slot())),
+ __ mov(Operand(ebp, SlotOffset(slot)),
Immediate(Factory::the_hole_value()));
} else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(Operand(ebp, SlotOffset(var->slot())));
+ VisitForValue(decl->fun(), kAccumulator);
+ __ mov(Operand(ebp, SlotOffset(slot)), result_register());
}
break;
case Slot::CONTEXT:
+ // We bypass the general EmitSlotSearch because we know more about
+ // this specific context.
+
// The variable in the decl always resides in the current context.
ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
if (FLAG_debug_code) {
@@ -442,11 +691,11 @@
__ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
// No write barrier since the hole value is in old space.
} else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(eax);
- __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
+ VisitForValue(decl->fun(), kAccumulator);
+ __ mov(CodeGenerator::ContextOperand(esi, slot->index()),
+ result_register());
int offset = Context::SlotOffset(slot->index());
- __ RecordWrite(esi, offset, eax, ecx);
+ __ RecordWrite(esi, offset, result_register(), ecx);
}
break;
@@ -466,7 +715,7 @@
if (decl->mode() == Variable::CONST) {
__ push(Immediate(Factory::the_hole_value()));
} else if (decl->fun() != NULL) {
- Visit(decl->fun());
+ VisitForValue(decl->fun(), kStack);
} else {
__ push(Immediate(Smi::FromInt(0))); // No initial value!
}
@@ -479,27 +728,24 @@
if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->key()->context());
- Visit(prop->key());
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
if (decl->fun() != NULL) {
- ASSERT_EQ(Expression::kValue, decl->fun()->context());
- Visit(decl->fun());
- __ pop(eax);
+ VisitForValue(decl->fun(), kAccumulator);
} else {
- __ Set(eax, Immediate(Factory::the_hole_value()));
+ __ mov(result_register(), Factory::the_hole_value());
}
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// Absence of a test eax instruction following the call
// indicates that none of the load was inlined.
+ __ nop();
// Value in eax is ignored (declarations are statements). Receiver
// and key on stack are discarded.
- __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ Drop(2);
}
}
}
@@ -515,20 +761,6 @@
}
-void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- Comment cmnt(masm_, "[ ReturnStatement");
- Expression* expr = stmt->expression();
- if (expr->AsLiteral() != NULL) {
- __ mov(eax, expr->AsLiteral()->handle());
- } else {
- ASSERT_EQ(Expression::kValue, expr->context());
- Visit(expr);
- __ pop(eax);
- }
- EmitReturnSequence(stmt->statement_pos());
-}
-
-
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
@@ -543,20 +775,26 @@
__ push(esi);
__ push(Immediate(boilerplate));
__ CallRuntime(Runtime::kNewClosure, 2);
- Move(expr->context(), eax);
+ Apply(context_, eax);
}
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
- Expression* rewrite = expr->var()->rewrite();
+ EmitVariableLoad(expr->var(), context_);
+}
+
+
+void FastCodeGenerator::EmitVariableLoad(Variable* var,
+ Expression::Context context) {
+ Expression* rewrite = var->rewrite();
if (rewrite == NULL) {
- ASSERT(expr->var()->is_global());
+ ASSERT(var->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object on the stack.
__ push(CodeGenerator::GlobalObject());
- __ mov(ecx, expr->name());
+ __ mov(ecx, var->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
// By emitting a nop we make sure that we do not have a test eax
@@ -564,14 +802,13 @@
// Remember that the assembler may choose to do peephole optimization
// (eg, push/pop elimination).
__ nop();
-
- DropAndMove(expr->context(), eax);
+ DropAndApply(1, context, eax);
} else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
if (FLAG_debug_code) {
switch (slot->type()) {
- case Slot::LOCAL:
- case Slot::PARAMETER: {
+ case Slot::PARAMETER:
+ case Slot::LOCAL: {
Comment cmnt(masm_, "Stack slot");
break;
}
@@ -582,47 +819,45 @@
case Slot::LOOKUP:
UNIMPLEMENTED();
break;
- default:
- UNREACHABLE();
}
}
- Move(expr->context(), slot, eax);
+ Apply(context, slot);
} else {
- Comment cmnt(masm_, "Variable rewritten to Property");
- // A variable has been rewritten into an explicit access to
- // an object property.
+ Comment cmnt(masm_, "Variable rewritten to property");
+ // A variable has been rewritten into an explicit access to an object
+ // property.
Property* property = rewrite->AsProperty();
ASSERT_NOT_NULL(property);
- // Currently the only parameter expressions that can occur are
- // on the form "slot[literal]".
+ // The only property expressions that can occur are of the form
+ // "slot[literal]".
- // Check that the object is in a slot.
+ // Assert that the object is in a slot.
Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
ASSERT_NOT_NULL(object_var);
Slot* object_slot = object_var->slot();
ASSERT_NOT_NULL(object_slot);
// Load the object.
- Move(Expression::kValue, object_slot, eax);
+ MemOperand object_loc = EmitSlotSearch(object_slot, eax);
+ __ push(object_loc);
- // Check that the key is a smi.
+ // Assert that the key is a smi.
Literal* key_literal = property->key()->AsLiteral();
ASSERT_NOT_NULL(key_literal);
ASSERT(key_literal->handle()->IsSmi());
// Load the key.
- Move(Expression::kValue, key_literal);
+ __ push(Immediate(key_literal->handle()));
- // Do a KEYED property load.
+ // Do a keyed property load.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
- // Notice: We must not have a "test eax, ..." instruction after
- // the call. It is treated specially by the LoadIC code.
+ // Notice: We must not have a "test eax, ..." instruction after the
+ // call. It is treated specially by the LoadIC code.
__ nop();
-
- // Drop key and object left on the stack by IC, and push the result.
- DropAndMove(expr->context(), eax, 2);
+ // Drop key and object left on the stack by IC.
+ DropAndApply(2, context, eax);
}
}
@@ -650,46 +885,24 @@
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
// Label done:
__ bind(&done);
- Move(expr->context(), eax);
+ Apply(context_, eax);
}
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Label exists;
- // Registers will be used as follows:
- // edi = JS function.
- // ebx = literals array.
- // eax = boilerplate
-
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ mov(eax, FieldOperand(ebx, literal_offset));
- __ cmp(eax, Factory::undefined_value());
- __ j(not_equal, &exists);
- // Create boilerplate if it does not exist.
- // Literal array (0).
- __ push(ebx);
- // Literal index (1).
+ __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
- // Constant properties (2).
__ push(Immediate(expr->constant_properties()));
- __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
- __ bind(&exists);
- // eax contains boilerplate.
- // Clone boilerplate.
- __ push(eax);
- if (expr->depth() == 1) {
- __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
} else {
- __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+ __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
}
- // If result_saved == true: The result is saved on top of the
- // stack and in eax.
- // If result_saved == false: The result not on the stack, just in eax.
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in eax.
bool result_saved = false;
for (int i = 0; i < expr->properties()->length(); i++) {
@@ -703,108 +916,58 @@
result_saved = true;
}
switch (property->kind()) {
- case ObjectLiteral::Property::MATERIALIZED_LITERAL: // fall through
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+ // Fall through.
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
- __ pop(eax);
+ VisitForValue(value, kAccumulator);
__ mov(ecx, Immediate(key->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
// StoreIC leaves the receiver on the stack.
- __ mov(eax, Operand(esp, 0)); // Restore result into eax.
break;
}
- // fall through
+ // Fall through.
case ObjectLiteral::Property::PROTOTYPE:
- __ push(eax);
- Visit(key);
- ASSERT_EQ(Expression::kValue, key->context());
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
+ __ push(Operand(esp, 0)); // Duplicate receiver.
+ VisitForValue(key, kStack);
+ VisitForValue(value, kStack);
__ CallRuntime(Runtime::kSetProperty, 3);
- __ mov(eax, Operand(esp, 0)); // Restore result into eax.
break;
- case ObjectLiteral::Property::SETTER: // fall through
+ case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
- __ push(eax);
- Visit(key);
- ASSERT_EQ(Expression::kValue, key->context());
+ __ push(Operand(esp, 0)); // Duplicate receiver.
+ VisitForValue(key, kStack);
__ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
Smi::FromInt(1) :
Smi::FromInt(0)));
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
+ VisitForValue(value, kStack);
__ CallRuntime(Runtime::kDefineAccessor, 4);
- __ mov(eax, Operand(esp, 0)); // Restore result into eax.
break;
default: UNREACHABLE();
}
}
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- if (result_saved) __ add(Operand(esp), Immediate(kPointerSize));
- break;
- case Expression::kValue:
- if (!result_saved) __ push(eax);
- break;
- case Expression::kTest:
- if (result_saved) __ pop(eax);
- TestAndBranch(eax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (!result_saved) __ push(eax);
- TestAndBranch(eax, true_label_, &discard);
- __ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (!result_saved) __ push(eax);
- TestAndBranch(eax, &discard, false_label_);
- __ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ jmp(true_label_);
- break;
- }
+
+ if (result_saved) {
+ ApplyTOS(context_);
+ } else {
+ Apply(context_, eax);
}
}
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- Label make_clone;
-
- // Fetch the function's literals array.
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ebx, FieldOperand(ebx, JSFunction::kLiteralsOffset));
- // Check if the literal's boilerplate has been instantiated.
- int offset =
- FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
- __ mov(eax, FieldOperand(ebx, offset));
- __ cmp(eax, Factory::undefined_value());
- __ j(not_equal, &make_clone);
-
- // Instantiate the boilerplate.
- __ push(ebx);
+ __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->literals()));
- __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
-
- __ bind(&make_clone);
- // Clone the boilerplate.
- __ push(eax);
+ __ push(Immediate(expr->constant_elements()));
if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else {
- __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+ __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
}
bool result_saved = false; // Is the result saved to the stack?
@@ -825,156 +988,87 @@
__ push(eax);
result_saved = true;
}
- Visit(subexpr);
- ASSERT_EQ(Expression::kValue, subexpr->context());
+ VisitForValue(subexpr, kAccumulator);
// Store the subexpression value in the array's elements.
- __ pop(eax); // Subexpression value.
__ mov(ebx, Operand(esp, 0)); // Copy of array literal.
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ mov(FieldOperand(ebx, offset), eax);
+ __ mov(FieldOperand(ebx, offset), result_register());
// Update the write barrier for the array store.
- __ RecordWrite(ebx, offset, eax, ecx);
+ __ RecordWrite(ebx, offset, result_register(), ecx);
}
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- if (result_saved) __ add(Operand(esp), Immediate(kPointerSize));
- break;
- case Expression::kValue:
- if (!result_saved) __ push(eax);
- break;
- case Expression::kTest:
- if (result_saved) __ pop(eax);
- TestAndBranch(eax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (!result_saved) __ push(eax);
- TestAndBranch(eax, true_label_, &discard);
- __ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (!result_saved) __ push(eax);
- TestAndBranch(eax, &discard, false_label_);
- __ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ jmp(true_label_);
- break;
- }
+ if (result_saved) {
+ ApplyTOS(context_);
+ } else {
+ Apply(context_, eax);
}
}
-void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ __ mov(ecx, Immediate(key->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+}
+
+
+void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+}
+
+
+void FastCodeGenerator::EmitBinaryOp(Token::Value op,
+ Expression::Context context) {
+ __ push(result_register());
+ GenericBinaryOpStub stub(op,
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS);
+ __ CallStub(&stub);
+ Apply(context, eax);
+}
+
+
+void FastCodeGenerator::EmitVariableAssignment(Variable* var,
+ Expression::Context context) {
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
if (var->is_global()) {
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in eax, variable name in
// ecx, and the global object on the stack.
- __ pop(eax);
__ mov(ecx, var->name());
__ push(CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
// Overwrite the receiver on the stack with the result if needed.
- DropAndMove(expr->context(), eax);
+ DropAndApply(1, context, eax);
} else if (var->slot() != NULL) {
Slot* slot = var->slot();
switch (slot->type()) {
case Slot::LOCAL:
- case Slot::PARAMETER: {
- Operand target = Operand(ebp, SlotOffset(var->slot()));
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Perform assignment and discard value.
- __ pop(target);
- break;
- case Expression::kValue:
- // Perform assignment and preserve value.
- __ mov(eax, Operand(esp, 0));
- __ mov(target, eax);
- break;
- case Expression::kTest:
- // Perform assignment and test (and discard) value.
- __ pop(eax);
- __ mov(target, eax);
- TestAndBranch(eax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ mov(eax, Operand(esp, 0));
- __ mov(target, eax);
- TestAndBranch(eax, true_label_, &discard);
- __ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ mov(eax, Operand(esp, 0));
- __ mov(target, eax);
- TestAndBranch(eax, &discard, false_label_);
- __ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ jmp(true_label_);
- break;
- }
- }
+ case Slot::PARAMETER:
+ __ mov(Operand(ebp, SlotOffset(slot)), result_register());
break;
- }
case Slot::CONTEXT: {
- int chain_length =
- function_->scope()->ContextChainLength(slot->var()->scope());
- if (chain_length > 0) {
- // Move up the context chain to the context containing the slot.
- __ mov(eax,
- Operand(esi, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- __ mov(eax, FieldOperand(eax, JSFunction::kContextOffset));
- for (int i = 1; i < chain_length; i++) {
- __ mov(eax,
- Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ mov(eax, FieldOperand(eax, JSFunction::kContextOffset));
- }
- } else { // Slot is in the current context. Generate optimized code.
- __ mov(eax, esi); // RecordWrite destroys the object register.
- }
- if (FLAG_debug_code) {
- __ cmp(eax,
- Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- __ Check(equal, "Context Slot chain length wrong.");
- }
- __ pop(ecx);
- __ mov(Operand(eax, Context::SlotOffset(slot->index())), ecx);
+ MemOperand target = EmitSlotSearch(slot, ecx);
+ __ mov(target, result_register());
// RecordWrite may destroy all its register arguments.
- if (expr->context() == Expression::kValue) {
- __ push(ecx);
- } else if (expr->context() != Expression::kEffect) {
- __ mov(edx, ecx);
- }
+ __ mov(edx, result_register());
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ RecordWrite(eax, offset, ecx, ebx);
- if (expr->context() != Expression::kEffect &&
- expr->context() != Expression::kValue) {
- Move(expr->context(), edx);
- }
+ __ RecordWrite(ecx, offset, edx, ebx);
break;
}
@@ -982,6 +1076,12 @@
UNREACHABLE();
break;
}
+ Apply(context, result_register());
+
+ } else {
+ // Variables rewritten as properties are not treated as variables in
+ // assignments.
+ UNREACHABLE();
}
}
@@ -996,14 +1096,18 @@
// change to slow case to avoid the quadratic behavior of repeatedly
// adding fast properties.
if (expr->starts_initialization_block()) {
- __ push(Operand(esp, kPointerSize)); // Receiver is under value.
+ __ push(result_register());
+ __ push(Operand(esp, kPointerSize)); // Receiver is now under value.
__ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
}
- __ pop(eax);
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
__ mov(ecx, prop->key()->AsLiteral()->handle());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1013,7 +1117,7 @@
__ pop(eax);
}
- DropAndMove(expr->context(), eax);
+ DropAndApply(1, context_, eax);
}
@@ -1024,12 +1128,15 @@
// change to slow case to avoid the quadratic behavior of repeatedly
// adding fast properties.
if (expr->starts_initialization_block()) {
- // Reciever is under the key and value.
+ __ push(result_register());
+ // Receiver is now under the key and value.
__ push(Operand(esp, 2 * kPointerSize));
__ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
}
- __ pop(eax);
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// This nop signals to the IC that there is no inlined code at the call
@@ -1039,72 +1146,55 @@
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
__ push(eax); // Result of assignment, saved even if not needed.
- // Reciever is under the key and value.
+ // Receiver is under the key and value.
__ push(Operand(esp, 2 * kPointerSize));
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(eax);
}
// Receiver and key are still on stack.
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- Move(expr->context(), eax);
+ DropAndApply(2, context_, eax);
}
void FastCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
- uint32_t dummy;
- // Record the source position for the property load.
- SetSourcePosition(expr->position());
+ // Evaluate the receiver.
+ VisitForValue(expr->obj(), kStack);
- // Evaluate receiver.
- Visit(expr->obj());
-
- if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
- !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
- // Do a NAMED property load.
- // The IC expects the property name in ecx and the receiver on the stack.
- __ mov(ecx, Immediate(key->AsLiteral()->handle()));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // By emitting a nop we make sure that we do not have a test eax
- // instruction after the call it is treated specially by the LoadIC code.
- __ nop();
+ if (key->IsPropertyName()) {
+ EmitNamedPropertyLoad(expr);
+ // Drop receiver left on the stack by IC.
+ DropAndApply(1, context_, eax);
} else {
- // Do a KEYED property load.
- Visit(expr->key());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // By emitting a nop we make sure that we do not have a "test eax,..."
- // instruction after the call it is treated specially by the LoadIC code.
- __ nop();
- // Drop key left on the stack by IC.
- __ add(Operand(esp), Immediate(kPointerSize));
+ VisitForValue(expr->key(), kStack);
+ EmitKeyedPropertyLoad(expr);
+ // Drop key and receiver left on the stack by IC.
+ DropAndApply(2, context_, eax);
}
- DropAndMove(expr->context(), eax);
}
-void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
+void FastCodeGenerator::EmitCallWithIC(Call* expr,
+ Handle<Object> name,
+ RelocInfo::Mode mode) {
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ VisitForValue(args->at(i), kStack);
}
- // Record source position for debugger.
+ __ Set(ecx, Immediate(name));
+ // Record source position of the IC call.
SetSourcePosition(expr->position());
- // Call the IC initialization code.
- Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
- NOT_IN_LOOP);
- __ call(ic, reloc_info);
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
+ __ call(ic, mode);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- DropAndMove(expr->context(), eax);
+ Apply(context_, eax);
}
@@ -1113,16 +1203,15 @@
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
+ VisitForValue(args->at(i), kStack);
}
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, NOT_IN_LOOP);
+ CallFunctionStub stub(arg_count, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
__ CallStub(&stub);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- DropAndMove(expr->context(), eax);
+ DropAndApply(1, context_, eax);
}
@@ -1135,11 +1224,9 @@
// Call to the identifier 'eval'.
UNREACHABLE();
} else if (var != NULL && !var->is_this() && var->is_global()) {
- // Call to a global variable.
- __ push(Immediate(var->name()));
- // Push global object as receiver for the call IC lookup.
+ // Push global object as receiver for the call IC.
__ push(CodeGenerator::GlobalObject());
- EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
+ EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// Call to a lookup slot.
@@ -1150,14 +1237,13 @@
Literal* key = prop->key()->AsLiteral();
if (key != NULL && key->handle()->IsSymbol()) {
// Call to a named property, use call IC.
- __ push(Immediate(key->handle()));
- Visit(prop->obj());
- EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
+ VisitForValue(prop->obj(), kStack);
+ EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property, use keyed load IC followed by function
// call.
- Visit(prop->obj());
- Visit(prop->key());
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
// Record source code position for IC call.
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@@ -1166,14 +1252,15 @@
// instruction after the call it is treated specially by the LoadIC code.
__ nop();
// Drop key left on the stack by IC.
- __ add(Operand(esp), Immediate(kPointerSize));
+ __ Drop(1);
// Pop receiver.
__ pop(ebx);
// Push result (function).
__ push(eax);
// Push receiver object on stack.
if (prop->is_synthetic()) {
- __ push(CodeGenerator::GlobalObject());
+ __ mov(ecx, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
} else {
__ push(ebx);
}
@@ -1189,7 +1276,7 @@
loop_depth() == 0) {
lit->set_try_fast_codegen(true);
}
- Visit(fun);
+ VisitForValue(fun, kStack);
// Load global receiver object.
__ mov(ebx, CodeGenerator::GlobalObject());
__ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
@@ -1205,8 +1292,7 @@
// expression in new calls must be evaluated before the
// arguments.
// Push function on the stack.
- Visit(expr->expression());
- ASSERT_EQ(Expression::kValue, expr->expression()->context());
+ VisitForValue(expr->expression(), kStack);
// Push global object (receiver).
__ push(CodeGenerator::GlobalObject());
@@ -1215,10 +1301,7 @@
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
- // If location is value, it is already on the stack,
- // so nothing to do here.
+ VisitForValue(args->at(i), kStack);
}
// Call the construct call builtin that handles allocation and
@@ -1234,7 +1317,7 @@
__ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
// Replace function on TOS with result in eax, or pop it.
- DropAndMove(expr->context(), eax);
+ DropAndApply(1, context_, eax);
}
@@ -1244,7 +1327,6 @@
if (expr->is_jsruntime()) {
// Prepare for calling JS runtime function.
- __ push(Immediate(expr->name()));
__ mov(eax, CodeGenerator::GlobalObject());
__ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
}
@@ -1252,24 +1334,22 @@
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ VisitForValue(args->at(i), kStack);
}
if (expr->is_jsruntime()) {
- // Call the JS runtime function.
- Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
- NOT_IN_LOOP);
+ // Call the JS runtime function via a call IC.
+ __ Set(ecx, Immediate(expr->name()));
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
__ call(ic, RelocInfo::CODE_TARGET);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- // Discard the function left on TOS.
- DropAndMove(expr->context(), eax);
} else {
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
- Move(expr->context(), eax);
}
+ Apply(context_, eax);
}
@@ -1277,22 +1357,35 @@
switch (expr->op()) {
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- Visit(expr->expression());
- ASSERT_EQ(Expression::kEffect, expr->expression()->context());
- switch (expr->context()) {
+ VisitForEffect(expr->expression());
+ switch (context_) {
case Expression::kUninitialized:
UNREACHABLE();
break;
case Expression::kEffect:
break;
case Expression::kValue:
- __ push(Immediate(Factory::undefined_value()));
+ switch (location_) {
+ case kAccumulator:
+ __ mov(result_register(), Factory::undefined_value());
+ break;
+ case kStack:
+ __ push(Immediate(Factory::undefined_value()));
+ break;
+ }
break;
case Expression::kTestValue:
// Value is false so it's needed.
- __ push(Immediate(Factory::undefined_value()));
+ switch (location_) {
+ case kAccumulator:
+ __ mov(result_register(), Factory::undefined_value());
+ break;
+ case kStack:
+ __ push(Immediate(Factory::undefined_value()));
+ break;
+ }
// Fall through.
- case Expression::kTest: // Fall through.
+ case Expression::kTest:
case Expression::kValueTest:
__ jmp(false_label_);
break;
@@ -1302,70 +1395,39 @@
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- ASSERT_EQ(Expression::kTest, expr->expression()->context());
-
- Label push_true;
- Label push_false;
- Label done;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
- switch (expr->context()) {
+ Label materialize_true, materialize_false, done;
+ // Initially assume a pure test context. Notice that the labels are
+ // swapped.
+ Label* if_true = false_label_;
+ Label* if_false = true_label_;
+ switch (context_) {
case Expression::kUninitialized:
UNREACHABLE();
break;
-
- case Expression::kValue:
- true_label_ = &push_false;
- false_label_ = &push_true;
- Visit(expr->expression());
- __ bind(&push_true);
- __ push(Immediate(Factory::true_value()));
- __ jmp(&done);
- __ bind(&push_false);
- __ push(Immediate(Factory::false_value()));
- __ bind(&done);
- break;
-
case Expression::kEffect:
- true_label_ = &done;
- false_label_ = &done;
- Visit(expr->expression());
- __ bind(&done);
+ if_true = &done;
+ if_false = &done;
break;
-
+ case Expression::kValue:
+ if_true = &materialize_false;
+ if_false = &materialize_true;
+ break;
case Expression::kTest:
- true_label_ = saved_false;
- false_label_ = saved_true;
- Visit(expr->expression());
break;
-
case Expression::kValueTest:
- true_label_ = saved_false;
- false_label_ = &push_true;
- Visit(expr->expression());
- __ bind(&push_true);
- __ push(Immediate(Factory::true_value()));
- __ jmp(saved_true);
+ if_false = &materialize_true;
break;
-
case Expression::kTestValue:
- true_label_ = &push_false;
- false_label_ = saved_true;
- Visit(expr->expression());
- __ bind(&push_false);
- __ push(Immediate(Factory::false_value()));
- __ jmp(saved_false);
+ if_true = &materialize_false;
break;
}
- true_label_ = saved_true;
- false_label_ = saved_false;
+ VisitForControl(expr->expression(), if_true, if_false);
+ Apply(context_, if_false, if_true); // Labels swapped.
break;
}
case Token::TYPEOF: {
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- ASSERT_EQ(Expression::kValue, expr->expression()->context());
-
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL &&
!proxy->var()->is_this() &&
@@ -1387,11 +1449,11 @@
__ push(eax);
} else {
// This expression cannot throw a reference error at the top level.
- Visit(expr->expression());
+ VisitForValue(expr->expression(), kStack);
}
__ CallRuntime(Runtime::kTypeof, 1);
- Move(expr->context(), eax);
+ Apply(context_, eax);
break;
}
@@ -1403,71 +1465,130 @@
void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- Visit(proxy);
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ Location saved_location = location_;
+ location_ = kStack;
+ EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
+ Expression::kValue);
+ location_ = saved_location;
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && context_ != Expression::kEffect) {
+ __ push(Immediate(Smi::FromInt(0)));
+ }
+ VisitForValue(prop->obj(), kStack);
+ if (assign_type == NAMED_PROPERTY) {
+ EmitNamedPropertyLoad(prop);
+ } else {
+ VisitForValue(prop->key(), kStack);
+ EmitKeyedPropertyLoad(prop);
+ }
+ __ push(eax);
+ }
+
+ // Convert to number.
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kValue: // Fall through
- case Expression::kTest: // Fall through
- case Expression::kTestValue: // Fall through
- case Expression::kValueTest:
- // Duplicate the result on the stack.
- __ push(eax);
- break;
- case Expression::kEffect:
- // Do not save result.
- break;
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ case Expression::kValue:
+ case Expression::kTest:
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(eax);
+ break;
+ case NAMED_PROPERTY:
+ __ mov(Operand(esp, kPointerSize), eax);
+ break;
+ case KEYED_PROPERTY:
+ __ mov(Operand(esp, 2 * kPointerSize), eax);
+ break;
+ }
+ break;
+ }
}
- // Call runtime for +1/-1.
+
+ // Call stub for +1/-1.
__ push(eax);
__ push(Immediate(Smi::FromInt(1)));
- if (expr->op() == Token::INC) {
- __ CallRuntime(Runtime::kNumberAdd, 2);
- } else {
- __ CallRuntime(Runtime::kNumberSub, 2);
- }
- // Call Store IC.
- __ mov(ecx, proxy->AsVariable()->name());
- __ push(CodeGenerator::GlobalObject());
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Restore up stack after store IC.
- __ add(Operand(esp), Immediate(kPointerSize));
+ GenericBinaryOpStub stub(expr->binary_op(),
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS);
+ __ CallStub(&stub);
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect: // Fall through
- case Expression::kValue:
- // Do nothing. Result in either on the stack for value context
- // or discarded for effect context.
+ // Store the value returned in eax.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Expression::kEffect);
+ // For all contexts except kEffect: We have the result on
+ // top of the stack.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ context_);
+ }
break;
- case Expression::kTest:
- __ pop(eax);
- TestAndBranch(eax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ mov(eax, Operand(esp, 0));
- TestAndBranch(eax, true_label_, &discard);
- __ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ jmp(false_label_);
+ case NAMED_PROPERTY: {
+ __ mov(ecx, prop->key()->AsLiteral()->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+ if (expr->is_postfix()) {
+ __ Drop(1); // Result is on the stack under the receiver.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ DropAndApply(1, context_, eax);
+ }
break;
}
- case Expression::kTestValue: {
- Label discard;
- __ mov(eax, Operand(esp, 0));
- TestAndBranch(eax, &discard, false_label_);
- __ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ jmp(true_label_);
+ case KEYED_PROPERTY: {
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+ if (expr->is_postfix()) {
+ __ Drop(2); // Result is on the stack under the key and the receiver.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ DropAndApply(2, context_, eax);
+ }
break;
}
}
@@ -1478,9 +1599,7 @@
Comment cmnt(masm_, "[ BinaryOperation");
switch (expr->op()) {
case Token::COMMA:
- ASSERT_EQ(Expression::kEffect, expr->left()->context());
- ASSERT_EQ(expr->context(), expr->right()->context());
- Visit(expr->left());
+ VisitForEffect(expr->left());
Visit(expr->right());
break;
@@ -1499,20 +1618,12 @@
case Token::BIT_XOR:
case Token::SHL:
case Token::SHR:
- case Token::SAR: {
- ASSERT_EQ(Expression::kValue, expr->left()->context());
- ASSERT_EQ(Expression::kValue, expr->right()->context());
-
- Visit(expr->left());
- Visit(expr->right());
- GenericBinaryOpStub stub(expr->op(),
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS);
- __ CallStub(&stub);
- Move(expr->context(), eax);
-
+ case Token::SAR:
+ VisitForValue(expr->left(), kStack);
+ VisitForValue(expr->right(), kAccumulator);
+ EmitBinaryOp(expr->op(), context_);
break;
- }
+
default:
UNREACHABLE();
}
@@ -1521,64 +1632,57 @@
void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- ASSERT_EQ(Expression::kValue, expr->left()->context());
- ASSERT_EQ(Expression::kValue, expr->right()->context());
- Visit(expr->left());
- Visit(expr->right());
- // Convert current context to test context: Pre-test code.
- Label push_true;
- Label push_false;
- Label done;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
- switch (expr->context()) {
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+ Label materialize_true, materialize_false, done;
+ // Initially assume we are in a test context.
+ Label* if_true = true_label_;
+ Label* if_false = false_label_;
+ switch (context_) {
case Expression::kUninitialized:
UNREACHABLE();
break;
-
- case Expression::kValue:
- true_label_ = &push_true;
- false_label_ = &push_false;
- break;
-
case Expression::kEffect:
- true_label_ = &done;
- false_label_ = &done;
+ if_true = &done;
+ if_false = &done;
break;
-
+ case Expression::kValue:
+ if_true = &materialize_true;
+ if_false = &materialize_false;
+ break;
case Expression::kTest:
break;
-
case Expression::kValueTest:
- true_label_ = &push_true;
+ if_true = &materialize_true;
break;
-
case Expression::kTestValue:
- false_label_ = &push_false;
+ if_false = &materialize_false;
break;
}
- // Convert current context to test context: End pre-test code.
+ VisitForValue(expr->left(), kStack);
switch (expr->op()) {
- case Token::IN: {
+ case Token::IN:
+ VisitForValue(expr->right(), kStack);
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
__ cmp(eax, Factory::true_value());
- __ j(equal, true_label_);
- __ jmp(false_label_);
+ __ j(equal, if_true);
+ __ jmp(if_false);
break;
- }
case Token::INSTANCEOF: {
+ VisitForValue(expr->right(), kStack);
InstanceofStub stub;
__ CallStub(&stub);
__ test(eax, Operand(eax));
- __ j(zero, true_label_); // The stub returns 0 for true.
- __ jmp(false_label_);
+ __ j(zero, if_true); // The stub returns 0 for true.
+ __ jmp(if_false);
break;
}
default: {
+ VisitForValue(expr->right(), kAccumulator);
Condition cc = no_condition;
bool strict = false;
switch (expr->op()) {
@@ -1587,29 +1691,26 @@
// Fall through
case Token::EQ:
cc = equal;
- __ pop(eax);
__ pop(edx);
break;
case Token::LT:
cc = less;
- __ pop(eax);
__ pop(edx);
break;
case Token::GT:
// Reverse left and right sizes to obtain ECMA-262 conversion order.
cc = less;
- __ pop(edx);
+ __ mov(edx, result_register());
__ pop(eax);
break;
case Token::LTE:
// Reverse left and right sizes to obtain ECMA-262 conversion order.
cc = greater_equal;
- __ pop(edx);
+ __ mov(edx, result_register());
__ pop(eax);
break;
case Token::GTE:
cc = greater_equal;
- __ pop(eax);
__ pop(edx);
break;
case Token::IN:
@@ -1626,64 +1727,78 @@
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow_case, not_taken);
__ cmp(edx, Operand(eax));
- __ j(cc, true_label_);
- __ jmp(false_label_);
+ __ j(cc, if_true);
+ __ jmp(if_false);
__ bind(&slow_case);
CompareStub stub(cc, strict);
__ CallStub(&stub);
__ test(eax, Operand(eax));
- __ j(cc, true_label_);
- __ jmp(false_label_);
+ __ j(cc, if_true);
+ __ jmp(if_false);
}
}
- // Convert current context to test context: Post-test code.
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
-
- case Expression::kValue:
- __ bind(&push_true);
- __ push(Immediate(Factory::true_value()));
- __ jmp(&done);
- __ bind(&push_false);
- __ push(Immediate(Factory::false_value()));
- __ bind(&done);
- break;
-
- case Expression::kEffect:
- __ bind(&done);
- break;
-
- case Expression::kTest:
- break;
-
- case Expression::kValueTest:
- __ bind(&push_true);
- __ push(Immediate(Factory::true_value()));
- __ jmp(saved_true);
- break;
-
- case Expression::kTestValue:
- __ bind(&push_false);
- __ push(Immediate(Factory::false_value()));
- __ jmp(saved_false);
- break;
- }
- true_label_ = saved_true;
- false_label_ = saved_false;
- // Convert current context to test context: End post-test code.
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ Apply(context_, if_true, if_false);
}
void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- Move(expr->context(), eax);
+ Apply(context_, eax);
}
-#undef __
+Register FastCodeGenerator::result_register() { return eax; }
+
+
+Register FastCodeGenerator::context_register() { return esi; }
+
+
+void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+ __ mov(Operand(ebp, frame_offset), value);
+}
+
+
+void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ mov(dst, CodeGenerator::ContextOperand(esi, context_index));
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FastCodeGenerator::EnterFinallyBlock() {
+ // Cook return address on top of stack (smi encoded Code* delta)
+ ASSERT(!result_register().is(edx));
+ __ mov(edx, Operand(esp, 0));
+ __ sub(Operand(edx), Immediate(masm_->CodeObject()));
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ ASSERT_EQ(0, kSmiTag);
+ __ add(edx, Operand(edx)); // Convert to smi.
+ __ mov(Operand(esp, 0), edx);
+ // Store result register while executing finally block.
+ __ push(result_register());
+}
+
+
+void FastCodeGenerator::ExitFinallyBlock() {
+ ASSERT(!result_register().is(edx));
+ // Restore result register from stack.
+ __ pop(result_register());
+ // Uncook return address.
+ __ mov(edx, Operand(esp, 0));
+ __ sar(edx, 1); // Convert smi to int.
+ __ add(Operand(edx), Immediate(masm_->CodeObject()));
+ __ mov(Operand(esp, 0), edx);
+ // And return.
+ __ ret(0);
+}
+
+
+#undef __
} } // namespace v8::internal
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 6988fe0..5658605 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -48,9 +48,13 @@
// must always call a backup property load that is complete.
// This function is safe to call if the receiver has fast properties,
// or if name is not a symbol, and will jump to the miss_label in that case.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
- Register r0, Register r1, Register r2,
- Register name) {
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss_label,
+ Register r0,
+ Register r1,
+ Register r2,
+ Register name,
+ DictionaryCheck check_dictionary) {
// Register use:
//
// r0 - used to hold the property dictionary.
@@ -86,11 +90,15 @@
__ cmp(r0, JS_BUILTINS_OBJECT_TYPE);
__ j(equal, miss_label, not_taken);
- // Check that the properties array is a dictionary.
+ // Load properties array.
__ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
- __ cmp(FieldOperand(r0, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
- __ j(not_equal, miss_label);
+
+ // Check that the properties array is a dictionary.
+ if (check_dictionary == CHECK_DICTIONARY) {
+ __ cmp(FieldOperand(r0, HeapObject::kMapOffset),
+ Immediate(Factory::hash_table_map()));
+ __ j(not_equal, miss_label);
+ }
// Compute the capacity mask.
const int kCapacityOffset =
@@ -223,7 +231,8 @@
// -- esp[4] : name
// -- esp[8] : receiver
// -----------------------------------
- Label slow, check_string, index_int, index_string, check_pixel_array;
+ Label slow, check_string, index_int, index_string;
+ Label check_pixel_array, probe_dictionary;
// Load name and receiver.
__ mov(eax, Operand(esp, kPointerSize));
@@ -302,17 +311,73 @@
__ test(ebx, Immediate(String::kIsArrayIndexMask));
__ j(not_zero, &index_string, not_taken);
- // If the string is a symbol, do a quick inline probe of the receiver's
- // dictionary, if it exists.
+ // Is the string a symbol?
__ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ ASSERT(kSymbolTag != 0);
__ test(ebx, Immediate(kIsSymbolMask));
__ j(zero, &slow, not_taken);
- // Probe the dictionary leaving result in ecx.
- GenerateDictionaryLoad(masm, &slow, ebx, ecx, edx, eax);
+
+ // If the receiver is a fast-case object, check the keyed lookup
+ // cache. Otherwise probe the dictionary leaving result in ecx.
+ __ mov(ebx, FieldOperand(ecx, JSObject::kPropertiesOffset));
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(Factory::hash_table_map()));
+ __ j(equal, &probe_dictionary);
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the string hash.
+ __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ mov(edx, ebx);
+ __ shr(edx, KeyedLookupCache::kMapHashShift);
+ __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
+ __ shr(eax, String::kHashShift);
+ __ xor_(edx, Operand(eax));
+ __ and_(edx, KeyedLookupCache::kCapacityMask);
+
+ // Load the key (consisting of map and symbol) from the cache and
+ // check for match.
+ ExternalReference cache_keys
+ = ExternalReference::keyed_lookup_cache_keys();
+ __ mov(edi, edx);
+ __ shl(edi, kPointerSizeLog2 + 1);
+ __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
+ __ j(not_equal, &slow);
+ __ add(Operand(edi), Immediate(kPointerSize));
+ __ mov(edi, Operand::StaticArray(edi, times_1, cache_keys));
+ __ cmp(edi, Operand(esp, kPointerSize));
+ __ j(not_equal, &slow);
+
+ // Get field offset and check that it is an in-object property.
+ ExternalReference cache_field_offsets
+ = ExternalReference::keyed_lookup_cache_field_offsets();
+ __ mov(eax,
+ Operand::StaticArray(edx, times_pointer_size, cache_field_offsets));
+ __ movzx_b(edx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
+ __ cmp(eax, Operand(edx));
+ __ j(above_equal, &slow);
+
+ // Load in-object property.
+ __ sub(eax, Operand(edx));
+ __ movzx_b(edx, FieldOperand(ebx, Map::kInstanceSizeOffset));
+ __ add(eax, Operand(edx));
+ __ mov(eax, FieldOperand(ecx, eax, times_pointer_size, 0));
+ __ ret(0);
+
+ // Do a quick inline probe of the receiver's dictionary, if it
+ // exists.
+ __ bind(&probe_dictionary);
+ GenerateDictionaryLoad(masm,
+ &slow,
+ ebx,
+ ecx,
+ edx,
+ eax,
+ DICTIONARY_CHECK_DONE);
GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx);
__ mov(eax, Operand(ecx));
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
+
// If the hash field contains an array index pick it out. The assert checks
// that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not
@@ -327,6 +392,48 @@
}
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : key
+ // -- esp[8] : receiver
+ // -----------------------------------
+ Label miss, index_ok;
+
+ // Pop return address.
+ // Performing the load early is better in the common case.
+ __ pop(eax);
+
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+ __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ test(ecx, Immediate(kIsNotStringMask));
+ __ j(not_zero, &miss);
+
+ // Check if key is a smi or a heap number.
+ __ mov(edx, Operand(esp, 0));
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &index_ok);
+ __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(ecx, Factory::heap_number_map());
+ __ j(not_equal, &miss);
+
+ __ bind(&index_ok);
+ // Duplicate receiver and key since they are expected on the stack after
+ // the KeyedLoadIC call.
+ __ push(ebx); // receiver
+ __ push(edx); // key
+ __ push(eax); // return address
+ __ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_FUNCTION);
+
+ __ bind(&miss);
+ __ push(eax);
+ GenerateMiss(masm);
+}
+
+
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
@@ -824,13 +931,16 @@
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label number, non_number, non_string, boolean, probe, miss;
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- // Get the name of the function from the stack; 2 ~ return address, receiver
- __ mov(ecx, Operand(esp, (argc + 2) * kPointerSize));
// Probe the stub cache.
Code::Flags flags =
@@ -876,7 +986,7 @@
// Cache miss: Jump to runtime.
__ bind(&miss);
- Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+ GenerateMiss(masm, argc);
}
@@ -884,27 +994,34 @@
int argc,
bool is_global_object,
Label* miss) {
- // Search dictionary - put result in register edx.
- GenerateDictionaryLoad(masm, miss, eax, edx, ebx, ecx);
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
- // Move the result to register edi and check that it isn't a smi.
- __ mov(edi, Operand(edx));
- __ test(edx, Immediate(kSmiTagMask));
+ // Search dictionary - put result in register edi.
+ __ mov(edi, edx);
+ GenerateDictionaryLoad(masm, miss, eax, edi, ebx, ecx, CHECK_DICTIONARY);
+
+ // Check that the result is not a smi.
+ __ test(edi, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
- // Check that the value is a JavaScript function.
- __ CmpObjectType(edx, JS_FUNCTION_TYPE, edx);
+ // Check that the value is a JavaScript function, fetching its map into eax.
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
__ j(not_equal, miss, not_taken);
- // Check that the function has been loaded.
- __ mov(edx, FieldOperand(edi, JSFunction::kMapOffset));
- __ mov(edx, FieldOperand(edx, Map::kBitField2Offset));
- __ test(edx, Immediate(1 << Map::kNeedsLoading));
+ // Check that the function has been loaded. eax holds function's map.
+ __ mov(eax, FieldOperand(eax, Map::kBitField2Offset));
+ __ test(eax, Immediate(1 << Map::kNeedsLoading));
__ j(not_zero, miss, not_taken);
- // Patch the receiver with the global proxy if necessary.
+ // Patch the receiver on stack with the global proxy if necessary.
if (is_global_object) {
- __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
__ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
__ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
}
@@ -917,14 +1034,17 @@
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss, global_object, non_global_object;
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- // Get the name of the function from the stack; 2 ~ return address, receiver.
- __ mov(ecx, Operand(esp, (argc + 2) * kPointerSize));
// Check that the receiver isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
@@ -973,33 +1093,33 @@
// Cache miss: Jump to runtime.
__ bind(&miss);
- Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+ GenerateMiss(masm, argc);
}
-void CallIC::Generate(MacroAssembler* masm,
- int argc,
- const ExternalReference& f) {
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
// Get the receiver of the function from the stack; 1 ~ return address.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- // Get the name of the function to call from the stack.
- // 2 ~ receiver, return address.
- __ mov(ebx, Operand(esp, (argc + 2) * kPointerSize));
// Enter an internal frame.
__ EnterInternalFrame();
// Push the receiver and the name of the function.
__ push(edx);
- __ push(ebx);
+ __ push(ecx);
// Call the entry.
CEntryStub stub(1);
__ mov(eax, Immediate(2));
- __ mov(ebx, Immediate(f));
+ __ mov(ebx, Immediate(ExternalReference(IC_Utility(kCallIC_Miss))));
__ CallStub(&stub);
// Move result to edi and exit the internal frame.
@@ -1011,11 +1131,11 @@
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // receiver
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &invoke, not_taken);
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ cmp(ecx, JS_GLOBAL_OBJECT_TYPE);
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
__ j(equal, &global);
- __ cmp(ecx, JS_BUILTINS_OBJECT_TYPE);
+ __ cmp(ebx, JS_BUILTINS_OBJECT_TYPE);
__ j(not_equal, &invoke);
// Patch the receiver on the stack.
@@ -1088,7 +1208,7 @@
// Search the dictionary placing the result in eax.
__ bind(&probe);
- GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx);
+ GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx, CHECK_DICTIONARY);
GenerateCheckNonObjectOrLoaded(masm, &miss, eax, edx);
__ ret(0);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index b91caa8..d7c7d3a 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -325,6 +325,17 @@
}
+Condition MacroAssembler::IsObjectStringType(Register heap_object,
+ Register map,
+ Register instance_type) {
+ mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ ASSERT(kNotStringTag != 0);
+ test(instance_type, Immediate(kIsNotStringMask));
+ return zero;
+}
+
+
void MacroAssembler::FCmp() {
if (CpuFeatures::IsSupported(CMOV)) {
fucomip();
@@ -504,6 +515,13 @@
}
+void MacroAssembler::PopTryHandler() {
+ ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
+ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+}
+
+
Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg,
Register scratch,
@@ -722,13 +740,13 @@
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required, not_taken);
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-
// Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
- or_(Operand(result), Immediate(kHeapObjectTag));
+ lea(result, Operand(result, kHeapObjectTag));
}
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
}
@@ -752,13 +770,13 @@
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-
// Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
- or_(Operand(result), Immediate(kHeapObjectTag));
+ lea(result, Operand(result, kHeapObjectTag));
}
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
}
@@ -783,13 +801,13 @@
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required, not_taken);
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-
// Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
- or_(Operand(result), Immediate(kHeapObjectTag));
+ lea(result, Operand(result, kHeapObjectTag));
}
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
}
@@ -834,10 +852,9 @@
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- mov(scratch1, length);
ASSERT(kShortSize == 2);
- shl(scratch1, 1);
- add(Operand(scratch1), Immediate(kObjectAlignmentMask));
+ // scratch1 = length * 2 + kObjectAlignmentMask.
+ lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
// Allocate two byte string in new space.
@@ -1016,17 +1033,37 @@
void MacroAssembler::CallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
+ ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
call(stub->GetCode(), RelocInfo::CODE_TARGET);
}
+Object* MacroAssembler::TryCallStub(CodeStub* stub) {
+ ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
+ Object* result = stub->TryGetCode();
+ if (!result->IsFailure()) {
+ call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
+ }
+ return result;
+}
+
+
void MacroAssembler::TailCallStub(CodeStub* stub) {
- ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
+ ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
}
+Object* MacroAssembler::TryTailCallStub(CodeStub* stub) {
+ ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs.
+ Object* result = stub->TryGetCode();
+ if (!result->IsFailure()) {
+ jmp(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
+ }
+ return result;
+}
+
+
void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
@@ -1046,6 +1083,12 @@
}
+Object* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
+ int num_arguments) {
+ return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
+}
+
+
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
@@ -1062,6 +1105,22 @@
}
+Object* MacroAssembler::TryCallRuntime(Runtime::Function* f,
+ int num_arguments) {
+ if (f->nargs >= 0 && f->nargs != num_arguments) {
+ IllegalOperation(num_arguments);
+ // Since we did not call the stub, there was no allocation failure.
+ // Return some non-failure object.
+ return Heap::undefined_value();
+ }
+
+ Runtime::FunctionId function_id =
+ static_cast<Runtime::FunctionId>(f->stub_id);
+ RuntimeStub stub(function_id, num_arguments);
+ return TryCallStub(&stub);
+}
+
+
void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
int num_arguments,
int result_size) {
@@ -1094,7 +1153,10 @@
}
-void MacroAssembler::PopHandleScope(Register saved, Register scratch) {
+Object* MacroAssembler::PopHandleScopeHelper(Register saved,
+ Register scratch,
+ bool gc_allowed) {
+ Object* result = NULL;
ExternalReference extensions_address =
ExternalReference::handle_scope_extensions_address();
Label write_back;
@@ -1104,7 +1166,12 @@
// Calling a runtime function messes with registers so we save and
// restore any one we're asked not to change
if (saved.is_valid()) push(saved);
- CallRuntime(Runtime::kDeleteHandleScopeExtensions, 0);
+ if (gc_allowed) {
+ CallRuntime(Runtime::kDeleteHandleScopeExtensions, 0);
+ } else {
+ result = TryCallRuntime(Runtime::kDeleteHandleScopeExtensions, 0);
+ if (result->IsFailure()) return result;
+ }
if (saved.is_valid()) pop(saved);
bind(&write_back);
@@ -1117,6 +1184,18 @@
pop(scratch);
shr(scratch, kSmiTagSize);
mov(Operand::StaticVariable(extensions_address), scratch);
+
+ return result;
+}
+
+
+void MacroAssembler::PopHandleScope(Register saved, Register scratch) {
+ PopHandleScopeHelper(saved, scratch, true);
+}
+
+
+Object* MacroAssembler::TryPopHandleScope(Register saved, Register scratch) {
+ return PopHandleScopeHelper(saved, scratch, false);
}
@@ -1301,7 +1380,6 @@
JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
mov(edi, FieldOperand(edx, builtins_offset));
-
return Builtins::GetCode(id, resolved);
}
@@ -1331,6 +1409,18 @@
}
+void MacroAssembler::Drop(int stack_elements) {
+ if (stack_elements > 0) {
+ add(Operand(esp), Immediate(stack_elements * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::Move(Register dst, Handle<Object> value) {
+ mov(dst, value);
+}
+
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index a41d42e..ceecebf 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -33,10 +33,13 @@
namespace v8 {
namespace internal {
+// Convenience for platform-independent signatures. We do not normally
+// distinguish memory operands from other operands on ia32.
+typedef Operand MemOperand;
+
// Forward declaration.
class JumpTarget;
-
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -138,10 +141,28 @@
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
+ // Check if the object in register heap_object is a string. Afterwards the
+ // register map contains the object map and the register instance_type
+ // contains the instance_type. The registers map and instance_type can be the
+ // same in which case it contains the instance type afterwards. Either of the
+ // registers map and instance_type can be the same as heap_object.
+ Condition IsObjectStringType(Register heap_object,
+ Register map,
+ Register instance_type);
+
// FCmp is similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
+ // Smi tagging support.
+ void SmiTag(Register reg) {
+ ASSERT(kSmiTag == 0);
+ shl(reg, kSmiTagSize);
+ }
+ void SmiUntag(Register reg) {
+ sar(reg, kSmiTagSize);
+ }
+
// ---------------------------------------------------------------------------
// Exception handling
@@ -149,6 +170,8 @@
// address must be pushed before calling this helper.
void PushTryHandler(CodeLocation try_location, HandlerType type);
+ // Unlink the stack handler on top of the stack from the try handler chain.
+ void PopTryHandler();
// ---------------------------------------------------------------------------
// Inline caching support
@@ -285,12 +308,22 @@
// ---------------------------------------------------------------------------
// Runtime calls
- // Call a code stub.
+ // Call a code stub. Generate the code if necessary.
void CallStub(CodeStub* stub);
- // Tail call a code stub (jump).
+ // Call a code stub and return the code object called. Try to generate
+ // the code if necessary. Do not perform a GC but instead return a retry
+ // after GC failure.
+ Object* TryCallStub(CodeStub* stub);
+
+ // Tail call a code stub (jump). Generate the code if necessary.
void TailCallStub(CodeStub* stub);
+ // Tail call a code stub (jump) and return the code object called. Try to
+ // generate the code if necessary. Do not perform a GC but instead return
+ // a retry after GC failure.
+ Object* TryTailCallStub(CodeStub* stub);
+
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
@@ -298,9 +331,17 @@
// Eventually this should be used for all C calls.
void CallRuntime(Runtime::Function* f, int num_arguments);
+ // Call a runtime function, returning the RuntimeStub object called.
+ // Try to generate the stub code if necessary. Do not perform a GC
+ // but instead return a retry after GC failure.
+ Object* TryCallRuntime(Runtime::Function* f, int num_arguments);
+
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments);
+ // Convenience function: Same as above, but takes the fid instead.
+ Object* TryCallRuntime(Runtime::FunctionId id, int num_arguments);
+
// Tail call of a runtime routine (jump).
// Like JumpToRuntime, but also takes care of passing the number
// of arguments.
@@ -314,6 +355,10 @@
// ensuring that saved register, it is not no_reg, is left unchanged.
void PopHandleScope(Register saved, Register scratch);
+ // As PopHandleScope, but does not perform a GC. Instead, returns a
+ // retry after GC failure object if GC is necessary.
+ Object* TryPopHandleScope(Register saved, Register scratch);
+
// Jump to a runtime routine.
void JumpToRuntime(const ExternalReference& ext);
@@ -323,6 +368,14 @@
void Ret();
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the esp register.
+ void Drop(int element_count);
+
+ void Call(Label* target) { call(target); }
+
+ void Move(Register target, Handle<Object> value);
+
struct Unresolved {
int pc;
uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
@@ -400,6 +453,13 @@
Register scratch,
AllocationFlags flags);
void UpdateAllocationTopHelper(Register result_end, Register scratch);
+
+ // Helper for PopHandleScope. Allowed to perform a GC and returns
+ // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
+ // possibly returns a failure object indicating an allocation failure.
+ Object* PopHandleScopeHelper(Register saved,
+ Register scratch,
+ bool gc_allowed);
};
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 2e13d8a..4af59dd 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -55,13 +55,17 @@
*
* Each call to a public method should retain this convention.
* The stack will have the following structure:
- * - stack_area_base (High end of the memory area to use as
- * backtracking stack)
- * - at_start (if 1, start at start of string, if 0, don't)
- * - int* capture_array (int[num_saved_registers_], for output).
- * - end of input (Address of end of string)
- * - start of input (Address of first character in string)
- * - void* input_string (location of a handle containing the string)
+ * - direct_call (if 1, direct call from JavaScript code, if 0
+ * call through the runtime system)
+ * - stack_area_base (High end of the memory area to use as
+ * backtracking stack)
+ * - at_start (if 1, we are starting at the start of the
+ * string, otherwise 0)
+ * - int* capture_array (int[num_saved_registers_], for output).
+ * - end of input (Address of end of string)
+ * - start of input (Address of first character in string)
+ * - start index (character index of start)
+ * - String* input_string (location of a handle containing the string)
* --- frame alignment (if applicable) ---
* - return address
* ebp-> - old ebp
@@ -81,11 +85,13 @@
* The data up to the return address must be placed there by the calling
* code, by calling the code entry as cast to a function with the signature:
* int (*match)(String* input_string,
+ * int start_index,
* Address start,
* Address end,
* int* capture_output_array,
* bool at_start,
- * byte* stack_area_base)
+ * byte* stack_area_base,
+ * bool direct_call)
*/
#define __ ACCESS_MASM(masm_)
@@ -471,8 +477,6 @@
bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
- int cp_offset,
- bool check_offset,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
// (c - min) <= (max - min) check
@@ -481,17 +485,12 @@
// Match space-characters
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
Label success;
__ cmp(current_character(), ' ');
__ j(equal, &success);
// Check range 0x09..0x0d
- __ sub(Operand(current_character()), Immediate('\t'));
- __ cmp(current_character(), '\r' - '\t');
+ __ lea(eax, Operand(current_character(), -'\t'));
+ __ cmp(eax, '\r' - '\t');
BranchOrBacktrack(above, on_no_match);
__ bind(&success);
return true;
@@ -499,72 +498,105 @@
return false;
case 'S':
// Match non-space characters.
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
__ cmp(current_character(), ' ');
BranchOrBacktrack(equal, on_no_match);
- __ sub(Operand(current_character()), Immediate('\t'));
- __ cmp(current_character(), '\r' - '\t');
+ __ lea(eax, Operand(current_character(), -'\t'));
+ __ cmp(eax, '\r' - '\t');
BranchOrBacktrack(below_equal, on_no_match);
return true;
}
return false;
case 'd':
// Match ASCII digits ('0'..'9')
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
- __ sub(Operand(current_character()), Immediate('0'));
- __ cmp(current_character(), '9' - '0');
+ __ lea(eax, Operand(current_character(), -'0'));
+ __ cmp(eax, '9' - '0');
BranchOrBacktrack(above, on_no_match);
return true;
case 'D':
// Match non ASCII-digits
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
- __ sub(Operand(current_character()), Immediate('0'));
- __ cmp(current_character(), '9' - '0');
+ __ lea(eax, Operand(current_character(), -'0'));
+ __ cmp(eax, '9' - '0');
BranchOrBacktrack(below_equal, on_no_match);
return true;
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
- __ xor_(Operand(current_character()), Immediate(0x01));
+ __ mov(Operand(eax), current_character());
+ __ xor_(Operand(eax), Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(Operand(current_character()), Immediate(0x0b));
- __ cmp(current_character(), 0x0c - 0x0b);
+ __ sub(Operand(eax), Immediate(0x0b));
+ __ cmp(eax, 0x0c - 0x0b);
BranchOrBacktrack(below_equal, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
// computed (current_char ^ 0x01 - 0x0b). I.e., check for
// 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(Operand(current_character()), Immediate(0x2028 - 0x0b));
- __ cmp(current_character(), 1);
+ __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
+ __ cmp(eax, 0x2029 - 0x2028);
BranchOrBacktrack(below_equal, on_no_match);
}
return true;
}
- case '*':
- // Match any character.
- if (check_offset) {
- CheckPosition(cp_offset, on_no_match);
+ case 'w': {
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmp(Operand(current_character()), Immediate('z'));
+ BranchOrBacktrack(above, on_no_match);
+ }
+ ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
+ ExternalReference word_map = ExternalReference::re_word_character_map();
+ __ test_b(current_character(),
+ Operand::StaticArray(current_character(), times_1, word_map));
+ BranchOrBacktrack(zero, on_no_match);
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmp(Operand(current_character()), Immediate('z'));
+ __ j(above, &done);
+ }
+ ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
+ ExternalReference word_map = ExternalReference::re_word_character_map();
+ __ test_b(current_character(),
+ Operand::StaticArray(current_character(), times_1, word_map));
+ BranchOrBacktrack(not_zero, on_no_match);
+ if (mode_ != ASCII) {
+ __ bind(&done);
}
return true;
- // No custom implementation (yet): w, W, s(UC16), S(UC16).
+ }
+ // Non-standard classes (with no syntactic shorthand) used internally.
+ case '*':
+ // Match any character.
+ return true;
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
+ // The opposite of '.'.
+ __ mov(Operand(eax), current_character());
+ __ xor_(Operand(eax), Immediate(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ sub(Operand(eax), Immediate(0x0b));
+ __ cmp(eax, 0x0c - 0x0b);
+ if (mode_ == ASCII) {
+ BranchOrBacktrack(above, on_no_match);
+ } else {
+ Label done;
+ BranchOrBacktrack(below_equal, &done);
+ ASSERT_EQ(UC16, mode_);
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
+ __ cmp(eax, 1);
+ BranchOrBacktrack(above, on_no_match);
+ __ bind(&done);
+ }
+ return true;
+ }
+ // No custom implementation (yet): s(UC16), S(UC16).
default:
return false;
}
@@ -942,6 +974,12 @@
// If not real stack overflow the stack guard was used to interrupt
// execution for another purpose.
+ // If this is a direct call from JavaScript retry the RegExp forcing the call
+ // through the runtime system. Currently the direct call cannot handle a GC.
+ if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+ return RETRY;
+ }
+
// Prepare for possible GC.
HandleScope handles;
Handle<Code> code_handle(re_code);
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index 5ffd462..8e7a6a5 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -78,10 +78,7 @@
// Checks whether the given offset from the current position is before
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- int cp_offset,
- bool check_offset,
- Label* on_no_match);
+ virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
virtual void Fail();
virtual Handle<Object> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
@@ -128,6 +125,7 @@
static const int kRegisterOutput = kInputEnd + kPointerSize;
static const int kAtStart = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kAtStart + kPointerSize;
+ static const int kDirectCall = kStackHighEnd + kPointerSize;
// Below the frame pointer - local stack variables.
// When adding local variables remember to push space for them in
// the frame in GetCode.
diff --git a/src/ia32/simulator-ia32.h b/src/ia32/simulator-ia32.h
index ce7ed0e..3ebd2e6 100644
--- a/src/ia32/simulator-ia32.h
+++ b/src/ia32/simulator-ia32.h
@@ -52,9 +52,9 @@
};
// Call the generated regexp code directly. The entry function pointer should
-// expect seven int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- entry(p0, p1, p2, p3, p4, p5, p6)
+// expect eight int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ entry(p0, p1, p2, p3, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 425c51d..5961294 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -152,11 +152,10 @@
}
-template <typename Pushable>
static void PushInterceptorArguments(MacroAssembler* masm,
Register receiver,
Register holder,
- Pushable name,
+ Register name,
JSObject* holder_obj) {
__ push(receiver);
__ push(holder);
@@ -237,7 +236,7 @@
// Load length from the string and convert to a smi.
__ bind(&load_length);
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
- __ shl(eax, kSmiTagSize);
+ __ SmiTag(eax);
__ ret(0);
// Check if the object is a JSValue wrapper.
@@ -285,11 +284,10 @@
}
-template <class Pushable>
static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
Register receiver,
Register holder,
- Pushable name,
+ Register name,
JSObject* holder_obj) {
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
@@ -495,8 +493,8 @@
class CallInterceptorCompiler BASE_EMBEDDED {
public:
- explicit CallInterceptorCompiler(const ParameterCount& arguments)
- : arguments_(arguments), argc_(arguments.immediate()) {}
+ CallInterceptorCompiler(const ParameterCount& arguments, Register name)
+ : arguments_(arguments), argc_(arguments.immediate()), name_(name) {}
void CompileCacheable(MacroAssembler* masm,
StubCompiler* stub_compiler,
@@ -527,17 +525,17 @@
}
__ EnterInternalFrame();
- __ push(holder); // save the holder
+ __ push(holder); // Save the holder.
+ __ push(name_); // Save the name.
- CompileCallLoadPropertyWithInterceptor(
- masm,
- receiver,
- holder,
- // Under EnterInternalFrame this refers to name.
- Operand(ebp, (argc_ + 3) * kPointerSize),
- holder_obj);
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
- __ pop(receiver); // restore holder
+ __ pop(name_); // Restore the name.
+ __ pop(receiver); // Restore the holder.
__ LeaveInternalFrame();
__ cmp(eax, Factory::no_interceptor_result_sentinel());
@@ -577,11 +575,13 @@
JSObject* holder_obj,
Label* miss_label) {
__ EnterInternalFrame();
+ // Save the name_ register across the call.
+ __ push(name_);
PushInterceptorArguments(masm,
receiver,
holder,
- Operand(ebp, (argc_ + 3) * kPointerSize),
+ name_,
holder_obj);
ExternalReference ref = ExternalReference(
@@ -592,12 +592,15 @@
CEntryStub stub(1);
__ CallStub(&stub);
+ // Restore the name_ register.
+ __ pop(name_);
__ LeaveInternalFrame();
}
private:
const ParameterCount& arguments_;
int argc_;
+ Register name_;
};
@@ -754,7 +757,7 @@
}
-void StubCompiler::GenerateLoadCallback(JSObject* object,
+bool StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
@@ -762,7 +765,8 @@
Register scratch2,
AccessorInfo* callback,
String* name,
- Label* miss) {
+ Label* miss,
+ Failure** failure) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
@@ -798,14 +802,30 @@
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
ApiGetterEntryStub stub(callback_handle, &fun);
- __ CallStub(&stub);
+ // Emitting a stub call may try to allocate (if the code is not
+ // already generated). Do not allow the assembler to perform a
+ // garbage collection but instead return the allocation failure
+ // object.
+ Object* result = masm()->TryCallStub(&stub);
+ if (result->IsFailure()) {
+ *failure = Failure::cast(result);
+ return false;
+ }
// We need to avoid using eax since that now holds the result.
Register tmp = other.is(eax) ? reg : other;
- __ PopHandleScope(eax, tmp);
+ // Emitting PopHandleScope may try to allocate. Do not allow the
+ // assembler to perform a garbage collection but instead return a
+ // failure object.
+ result = masm()->TryPopHandleScope(eax, tmp);
+ if (result->IsFailure()) {
+ *failure = Failure::cast(result);
+ return false;
+ }
__ LeaveInternalFrame();
__ ret(0);
+ return true;
}
@@ -885,6 +905,11 @@
int index,
String* name) {
// ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss;
@@ -899,7 +924,7 @@
// Do the right check and compute the holder register.
Register reg =
CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, ecx, name, &miss);
+ ebx, eax, name, &miss);
GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
@@ -935,6 +960,11 @@
String* name,
CheckType check) {
// ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss;
@@ -956,7 +986,7 @@
case RECEIVER_MAP_CHECK:
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, ecx, name, &miss);
+ ebx, eax, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@@ -967,56 +997,71 @@
break;
case STRING_CHECK:
- // Check that the object is a two-byte string or a symbol.
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &miss, not_taken);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- ecx);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
- ebx, edx, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ // Check that the object is a string or a symbol.
+ __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
+ __ cmp(eax, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &miss, not_taken);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ eax);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+ ebx, edx, name, &miss);
+ }
break;
case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &fast, taken);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &miss, not_taken);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::NUMBER_FUNCTION_INDEX,
- ecx);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
- ebx, edx, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &fast, taken);
+ __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
+ __ j(not_equal, &miss, not_taken);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::NUMBER_FUNCTION_INDEX,
+ eax);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+ ebx, edx, name, &miss);
+ }
break;
}
case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ cmp(edx, Factory::true_value());
- __ j(equal, &fast, taken);
- __ cmp(edx, Factory::false_value());
- __ j(not_equal, &miss, not_taken);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::BOOLEAN_FUNCTION_INDEX,
- ecx);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
- ebx, edx, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a boolean.
+ __ cmp(edx, Factory::true_value());
+ __ j(equal, &fast, taken);
+ __ cmp(edx, Factory::false_value());
+ __ j(not_equal, &miss, not_taken);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::BOOLEAN_FUNCTION_INDEX,
+ eax);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+ ebx, edx, name, &miss);
+ }
break;
}
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, ecx, name, &miss);
+ ebx, eax, name, &miss);
// Make sure object->HasFastElements().
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
@@ -1059,6 +1104,11 @@
JSObject* holder,
String* name) {
// ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss;
@@ -1071,7 +1121,7 @@
// Get the receiver from the stack.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- CallInterceptorCompiler compiler(arguments());
+ CallInterceptorCompiler compiler(arguments(), ecx);
CompileLoadInterceptor(&compiler,
this,
masm(),
@@ -1081,7 +1131,7 @@
&lookup,
edx,
ebx,
- ecx,
+ edi,
&miss);
// Restore receiver.
@@ -1120,6 +1170,11 @@
JSFunction* function,
String* name) {
// ----------- S t a t e -------------
+ // -- ecx : name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
Label miss;
@@ -1138,15 +1193,32 @@
}
// Check that the maps haven't changed.
- CheckPrototypes(object, edx, holder, ebx, ecx, name, &miss);
+ CheckPrototypes(object, edx, holder, ebx, eax, name, &miss);
// Get the value from the cell.
__ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
__ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
- __ j(not_equal, &miss, not_taken);
+ if (Heap::InNewSpace(function)) {
+ // We can't embed a pointer to a function in new space so we have
+ // to verify that the shared function info is unchanged. This has
+ // the nice side effect that multiple closures based on the same
+ // function can all use this call IC. Before we load through the
+ // function, we have to verify that it still is a function.
+ __ test(edi, Immediate(kSmiTagMask));
+ __ j(zero, &miss, not_taken);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
+ __ j(not_equal, &miss, not_taken);
+
+ // Check the shared function info. Make sure it hasn't changed.
+ __ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
+ Immediate(Handle<SharedFunctionInfo>(function->shared())));
+ __ j(not_equal, &miss, not_taken);
+ } else {
+ __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
+ __ j(not_equal, &miss, not_taken);
+ }
// Patch the receiver on the stack with the global proxy.
if (object->IsGlobalObject()) {
@@ -1420,10 +1492,10 @@
}
-Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+Object* LoadStubCompiler::CompileLoadCallback(String* name,
+ JSObject* object,
JSObject* holder,
- AccessorInfo* callback,
- String* name) {
+ AccessorInfo* callback) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -1432,8 +1504,11 @@
Label miss;
__ mov(eax, Operand(esp, kPointerSize));
- GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
- callback, name, &miss);
+ Failure* failure = Failure::InternalError();
+ bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
+ callback, name, &miss, &failure);
+ if (!success) return failure;
+
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1597,8 +1672,11 @@
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
- GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx,
- callback, name, &miss);
+ Failure* failure = Failure::InternalError();
+ bool success = GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx,
+ callback, name, &miss, &failure);
+ if (!success) return failure;
+
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_callback, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1837,17 +1915,23 @@
// depending on the this.x = ...; assignment in the function.
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
- Label not_passed;
- // Set the property to undefined.
- __ mov(Operand(edx, i * kPointerSize), edi);
// Check if the argument assigned to the property is actually passed.
+ // If argument is not passed the property is set to undefined,
+ // otherwise find it on the stack.
int arg_number = shared->GetThisPropertyAssignmentArgument(i);
+ __ mov(ebx, edi);
__ cmp(eax, arg_number);
- __ j(below_equal, ¬_passed);
- // Argument passed - find it on the stack.
- __ mov(ebx, Operand(ecx, arg_number * -kPointerSize));
+ if (CpuFeatures::IsSupported(CMOV)) {
+ CpuFeatures::Scope use_cmov(CMOV);
+ __ cmov(above, ebx, Operand(ecx, arg_number * -kPointerSize));
+ } else {
+ Label not_passed;
+ __ j(below_equal, ¬_passed);
+ __ mov(ebx, Operand(ecx, arg_number * -kPointerSize));
+ __ bind(¬_passed);
+ }
+ // Store value in the property.
__ mov(Operand(edx, i * kPointerSize), ebx);
- __ bind(¬_passed);
} else {
// Set the property to the constant value.
Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index e770cdd..104d187 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -513,13 +513,33 @@
Handle<Object> undefined = Factory::undefined_value();
FrameElement initial_value =
FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ Set(temp.reg(), Immediate(undefined));
+ if (count == 1) {
+ __ push(Immediate(undefined));
+ } else if (count < kLocalVarBound) {
+ // For less locals the unrolled loop is more compact.
+ Result temp = cgen()->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ Set(temp.reg(), Immediate(undefined));
+ for (int i = 0; i < count; i++) {
+ __ push(temp.reg());
+ }
+ } else {
+ // For more locals a loop in generated code is more compact.
+ Label alloc_locals_loop;
+ Result cnt = cgen()->allocator()->Allocate();
+ Result tmp = cgen()->allocator()->Allocate();
+ ASSERT(cnt.is_valid());
+ ASSERT(tmp.is_valid());
+ __ mov(cnt.reg(), Immediate(count));
+ __ mov(tmp.reg(), Immediate(undefined));
+ __ bind(&alloc_locals_loop);
+ __ push(tmp.reg());
+ __ dec(cnt.reg());
+ __ j(not_zero, &alloc_locals_loop);
+ }
for (int i = 0; i < count; i++) {
elements_.Add(initial_value);
stack_pointer_++;
- __ push(temp.reg());
}
}
}
@@ -925,14 +945,17 @@
Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
int arg_count,
int loop_nesting) {
- // Arguments, receiver, and function name are on top of the frame.
- // The IC expects them on the stack. It does not drop the function
- // name slot (but it does drop the rest).
+ // Function name, arguments, and receiver are on top of the frame.
+ // The IC expects the name in ecx and the rest on the stack and
+ // drops them all.
InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = cgen()->ComputeCallInitialize(arg_count, in_loop);
// Spill args, receiver, and function. The call will drop args and
// receiver.
- PrepareForCall(arg_count + 2, arg_count + 1);
+ Result name = Pop();
+ PrepareForCall(arg_count + 1, arg_count + 1); // Arguments + receiver.
+ name.ToRegister(ecx);
+ name.Unuse();
return RawCallCodeObject(ic, mode);
}
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
index 314ea73..d6d55d1 100644
--- a/src/ia32/virtual-frame-ia32.h
+++ b/src/ia32/virtual-frame-ia32.h
@@ -199,6 +199,9 @@
// shared return site. Emits code for spills.
void PrepareForReturn();
+ // Number of local variables after when we use a loop for allocating.
+ static const int kLocalVarBound = 10;
+
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots();
@@ -341,9 +344,9 @@
// of the frame. Key and receiver are not dropped.
Result CallKeyedStoreIC();
- // Call call IC. Arguments, reciever, and function name are found
- // on top of the frame. Function name slot is not dropped. The
- // argument count does not include the receiver.
+ // Call call IC. Function name, arguments, and receiver are found on top
+ // of the frame and dropped by the call. The argument count does not
+ // include the receiver.
Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
// Allocate and call JS function as constructor. Arguments,
@@ -392,6 +395,8 @@
// Pushing a result invalidates it (its contents become owned by the
// frame).
void Push(Result* result) {
+ // This assert will trigger if you try to push the same value twice.
+ ASSERT(result->is_valid());
if (result->is_register()) {
Push(result->reg());
} else {
diff --git a/src/ic.cc b/src/ic.cc
index 2779356..d823c91 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -378,6 +378,18 @@
return *delegate;
}
+void CallIC::ReceiverToObject(Handle<Object> object) {
+ HandleScope scope;
+ Handle<Object> receiver(object);
+
+ // Change the receiver to the result of calling ToObject on it.
+ const int argc = this->target()->arguments_count();
+ StackFrameLocator locator;
+ JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
+ int index = frame->ComputeExpressionsCount() - (argc + 1);
+ frame->SetExpression(index, *Factory::ToObject(object));
+}
+
Object* CallIC::LoadFunction(State state,
Handle<Object> object,
@@ -388,6 +400,10 @@
return TypeError("non_object_property_call", object, name);
}
+ if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
+ ReceiverToObject(object);
+ }
+
// Check if the name is trivially convertible to an index and get
// the element if so.
uint32_t index;
@@ -409,7 +425,7 @@
if (!lookup.IsValid()) {
// If the object does not have the requested property, check which
// exception we need to throw.
- if (is_contextual()) {
+ if (IsContextual(object)) {
return ReferenceError("not_defined", name);
}
return TypeError("undefined_method", object, name);
@@ -428,7 +444,7 @@
// If the object does not have the requested property, check which
// exception we need to throw.
if (attr == ABSENT) {
- if (is_contextual()) {
+ if (IsContextual(object)) {
return ReferenceError("not_defined", name);
}
return TypeError("undefined_method", object, name);
@@ -628,7 +644,7 @@
// If lookup is invalid, check if we need to throw an exception.
if (!lookup.IsValid()) {
- if (FLAG_strict || is_contextual()) {
+ if (FLAG_strict || IsContextual(object)) {
return ReferenceError("not_defined", name);
}
LOG(SuspectReadEvent(*name, *object));
@@ -671,7 +687,7 @@
if (result->IsFailure()) return result;
// If the property is not present, check if we need to throw an
// exception.
- if (attr == ABSENT && is_contextual()) {
+ if (attr == ABSENT && IsContextual(object)) {
return ReferenceError("not_defined", name);
}
return result;
@@ -843,7 +859,7 @@
// If lookup is invalid, check if we need to throw an exception.
if (!lookup.IsValid()) {
- if (FLAG_strict || is_contextual()) {
+ if (FLAG_strict || IsContextual(object)) {
return ReferenceError("not_defined", name);
}
}
@@ -859,7 +875,7 @@
if (result->IsFailure()) return result;
// If the property is not present, check if we need to throw an
// exception.
- if (attr == ABSENT && is_contextual()) {
+ if (attr == ABSENT && IsContextual(object)) {
return ReferenceError("not_defined", name);
}
return result;
@@ -874,7 +890,9 @@
if (use_ic) {
Code* stub = generic_stub();
- if (object->IsJSObject()) {
+ if (object->IsString() && key->IsNumber()) {
+ stub = string_stub();
+ } else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) {
stub = external_array_stub(receiver->GetElementsKind());
@@ -1292,16 +1310,6 @@
}
-void CallIC::GenerateInitialize(MacroAssembler* masm, int argc) {
- Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
-}
-
-
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
- Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
-}
-
-
// Used from ic_<arch>.cc.
Object* LoadIC_Miss(Arguments args) {
NoHandleAllocation na;
diff --git a/src/ic.h b/src/ic.h
index 8709088..be7f956 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -33,6 +33,11 @@
namespace v8 {
namespace internal {
+// Flag indicating whether an IC stub needs to check that a backing
+// store is in dictionary case.
+enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
+
+
// IC_UTIL_LIST defines all utility functions called from generated
// inline caching code. The argument for the macro, ICU, is the function name.
#define IC_UTIL_LIST(ICU) \
@@ -99,7 +104,16 @@
// Returns if this IC is for contextual (no explicit receiver)
// access to properties.
- bool is_contextual() {
+ bool IsContextual(Handle<Object> receiver) {
+ if (receiver->IsGlobalObject()) {
+ return SlowIsContextual();
+ } else {
+ ASSERT(!SlowIsContextual());
+ return false;
+ }
+ }
+
+ bool SlowIsContextual() {
return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
}
@@ -175,16 +189,14 @@
// Code generator routines.
- static void GenerateInitialize(MacroAssembler* masm, int argc);
+ static void GenerateInitialize(MacroAssembler* masm, int argc) {
+ GenerateMiss(masm, argc);
+ }
static void GenerateMiss(MacroAssembler* masm, int argc);
static void GenerateMegamorphic(MacroAssembler* masm, int argc);
static void GenerateNormal(MacroAssembler* masm, int argc);
private:
- static void Generate(MacroAssembler* masm,
- int argc,
- const ExternalReference& f);
-
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
@@ -197,6 +209,8 @@
// Otherwise, it returns the undefined value.
Object* TryCallAsFunction(Object* object);
+ void ReceiverToObject(Handle<Object> object);
+
static void Clear(Address address, Code* target);
friend class IC;
};
@@ -268,6 +282,7 @@
static void GenerateInitialize(MacroAssembler* masm);
static void GeneratePreMonomorphic(MacroAssembler* masm);
static void GenerateGeneric(MacroAssembler* masm);
+ static void GenerateString(MacroAssembler* masm);
// Generators for external array types. See objects.h.
// These are similar to the generic IC; they optimize the case of
@@ -301,6 +316,9 @@
static Code* pre_monomorphic_stub() {
return Builtins::builtin(Builtins::KeyedLoadIC_PreMonomorphic);
}
+ static Code* string_stub() {
+ return Builtins::builtin(Builtins::KeyedLoadIC_String);
+ }
static Code* external_array_stub(JSObject::ElementsKind elements_kind);
static void Clear(Address address, Code* target);
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 04d1944..8af472d 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -112,37 +112,6 @@
// Generic RegExp methods. Dispatches to implementation specific methods.
-class OffsetsVector {
- public:
- inline OffsetsVector(int num_registers)
- : offsets_vector_length_(num_registers) {
- if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
- vector_ = NewArray<int>(offsets_vector_length_);
- } else {
- vector_ = static_offsets_vector_;
- }
- }
- inline ~OffsetsVector() {
- if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
- DeleteArray(vector_);
- vector_ = NULL;
- }
- }
- inline int* vector() { return vector_; }
- inline int length() { return offsets_vector_length_; }
-
- private:
- int* vector_;
- int offsets_vector_length_;
- static const int kStaticOffsetsVectorSize = 50;
- static int static_offsets_vector_[kStaticOffsetsVectorSize];
-};
-
-
-int OffsetsVector::static_offsets_vector_[
- OffsetsVector::kStaticOffsetsVectorSize];
-
-
Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
Handle<String> pattern,
Handle<String> flag_str) {
@@ -448,6 +417,14 @@
ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
// The captures come in (start, end+1) pairs.
for (int i = 0; i < number_of_capture_registers; i += 2) {
+ // Capture values are relative to start_offset only.
+ // Convert them to be relative to start of string.
+ if (captures_vector[i] >= 0) {
+ captures_vector[i] += previous_index;
+ }
+ if (captures_vector[i + 1] >= 0) {
+ captures_vector[i + 1] += previous_index;
+ }
SetCapture(*array, i, captures_vector[i]);
SetCapture(*array, i + 1, captures_vector[i + 1]);
}
@@ -1431,14 +1408,6 @@
int cp_offset,
bool check_offset,
bool preloaded) {
- if (cc->is_standard() &&
- macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
- cp_offset,
- check_offset,
- on_failure)) {
- return;
- }
-
ZoneList<CharacterRange>* ranges = cc->ranges();
int max_char;
if (ascii) {
@@ -1489,6 +1458,12 @@
macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check_offset);
}
+ if (cc->is_standard() &&
+ macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
+ on_failure)) {
+ return;
+ }
+
for (int i = 0; i < last_valid_range; i++) {
CharacterRange& range = ranges->at(i);
Label next_range;
@@ -1626,8 +1601,8 @@
}
-int NegativeLookaheadChoiceNode:: EatsAtLeast(int still_to_find,
- int recursion_depth) {
+int NegativeLookaheadChoiceNode::EatsAtLeast(int still_to_find,
+ int recursion_depth) {
if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
// Alternative 0 is the negative lookahead, alternative 1 is what comes
// afterwards.
@@ -2049,6 +2024,12 @@
Label* word,
Label* non_word,
bool fall_through_on_word) {
+ if (assembler->CheckSpecialCharacterClass(
+ fall_through_on_word ? 'w' : 'W',
+ fall_through_on_word ? non_word : word)) {
+ // Optimized implementation available.
+ return;
+ }
assembler->CheckCharacterGT('z', non_word);
assembler->CheckCharacterLT('0', non_word);
assembler->CheckCharacterGT('a' - 1, word);
@@ -2085,17 +2066,60 @@
assembler->LoadCurrentCharacter(new_trace.cp_offset() -1,
new_trace.backtrack(),
false);
- // Newline means \n, \r, 0x2028 or 0x2029.
- if (!compiler->ascii()) {
- assembler->CheckCharacterAfterAnd(0x2028, 0xfffe, &ok);
+ if (!assembler->CheckSpecialCharacterClass('n',
+ new_trace.backtrack())) {
+ // Newline means \n, \r, 0x2028 or 0x2029.
+ if (!compiler->ascii()) {
+ assembler->CheckCharacterAfterAnd(0x2028, 0xfffe, &ok);
+ }
+ assembler->CheckCharacter('\n', &ok);
+ assembler->CheckNotCharacter('\r', new_trace.backtrack());
}
- assembler->CheckCharacter('\n', &ok);
- assembler->CheckNotCharacter('\r', new_trace.backtrack());
assembler->Bind(&ok);
on_success->Emit(compiler, &new_trace);
}
+// Emit the code to handle \b and \B (word-boundary or non-word-boundary)
+// when we know whether the next character must be a word character or not.
+static void EmitHalfBoundaryCheck(AssertionNode::AssertionNodeType type,
+ RegExpCompiler* compiler,
+ RegExpNode* on_success,
+ Trace* trace) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ Label done;
+
+ Trace new_trace(*trace);
+
+ bool expect_word_character = (type == AssertionNode::AFTER_WORD_CHARACTER);
+ Label* on_word = expect_word_character ? &done : new_trace.backtrack();
+ Label* on_non_word = expect_word_character ? new_trace.backtrack() : &done;
+
+ // Check whether previous character was a word character.
+ switch (trace->at_start()) {
+ case Trace::TRUE:
+ if (expect_word_character) {
+ assembler->GoTo(on_non_word);
+ }
+ break;
+ case Trace::UNKNOWN:
+ ASSERT_EQ(0, trace->cp_offset());
+ assembler->CheckAtStart(on_non_word);
+ // Fall through.
+ case Trace::FALSE:
+ int prev_char_offset = trace->cp_offset() - 1;
+ assembler->LoadCurrentCharacter(prev_char_offset, NULL, false, 1);
+ EmitWordCheck(assembler, on_word, on_non_word, expect_word_character);
+ // We may or may not have loaded the previous character.
+ new_trace.InvalidateCurrentCharacter();
+ }
+
+ assembler->Bind(&done);
+
+ on_success->Emit(compiler, &new_trace);
+}
+
+
// Emit the code to handle \b and \B (word-boundary or non-word-boundary).
static void EmitBoundaryCheck(AssertionNode::AssertionNodeType type,
RegExpCompiler* compiler,
@@ -2205,10 +2229,15 @@
case AFTER_NEWLINE:
EmitHat(compiler, on_success(), trace);
return;
- case AT_NON_BOUNDARY:
case AT_BOUNDARY:
+ case AT_NON_BOUNDARY: {
EmitBoundaryCheck(type_, compiler, on_success(), trace);
return;
+ }
+ case AFTER_WORD_CHARACTER:
+ case AFTER_NONWORD_CHARACTER: {
+ EmitHalfBoundaryCheck(type_, compiler, on_success(), trace);
+ }
}
on_success()->Emit(compiler, trace);
}
@@ -2791,7 +2820,7 @@
// to generate probably can't use it.
if (i != first_normal_choice) {
alt_gen->expects_preload = false;
- new_trace.set_characters_preloaded(0);
+ new_trace.InvalidateCurrentCharacter();
}
if (i < choice_count - 1) {
new_trace.set_backtrack(&alt_gen->after);
@@ -3282,6 +3311,12 @@
case AssertionNode::AFTER_NEWLINE:
stream()->Add("label=\"(?<=\\n)\", shape=septagon");
break;
+ case AssertionNode::AFTER_WORD_CHARACTER:
+ stream()->Add("label=\"(?<=\\w)\", shape=septagon");
+ break;
+ case AssertionNode::AFTER_NONWORD_CHARACTER:
+ stream()->Add("label=\"(?<=\\W)\", shape=septagon");
+ break;
}
stream()->Add("];\n");
PrintAttributes(that);
@@ -3484,6 +3519,20 @@
set_.set_standard_set_type('.');
return true;
}
+ if (CompareRanges(set_.ranges(),
+ kLineTerminatorRanges,
+ kLineTerminatorRangeCount)) {
+ set_.set_standard_set_type('n');
+ return true;
+ }
+ if (CompareRanges(set_.ranges(), kWordRanges, kWordRangeCount)) {
+ set_.set_standard_set_type('w');
+ return true;
+ }
+ if (CompareInverseRanges(set_.ranges(), kWordRanges, kWordRangeCount)) {
+ set_.set_standard_set_type('W');
+ return true;
+ }
return false;
}
@@ -4010,6 +4059,101 @@
}
+bool CharacterRange::IsCanonical(ZoneList<CharacterRange>* ranges) {
+ ASSERT_NOT_NULL(ranges);
+ int n = ranges->length();
+ if (n <= 1) return true;
+ int max = ranges->at(0).to();
+ for (int i = 1; i < n; i++) {
+ CharacterRange next_range = ranges->at(i);
+ if (next_range.from() <= max + 1) return false;
+ max = next_range.to();
+ }
+ return true;
+}
+
+SetRelation CharacterRange::WordCharacterRelation(
+ ZoneList<CharacterRange>* range) {
+ ASSERT(IsCanonical(range));
+ int i = 0; // Word character range index.
+ int j = 0; // Argument range index.
+ ASSERT_NE(0, kWordRangeCount);
+ SetRelation result;
+ if (range->length() == 0) {
+ result.SetElementsInSecondSet();
+ return result;
+ }
+ CharacterRange argument_range = range->at(0);
+ CharacterRange word_range = CharacterRange(kWordRanges[0], kWordRanges[1]);
+ while (i < kWordRangeCount && j < range->length()) {
+ // Check the two ranges for the five cases:
+ // - no overlap.
+ // - partial overlap (there are elements in both ranges that isn't
+ // in the other, and there are also elements that are in both).
+ // - argument range entirely inside word range.
+ // - word range entirely inside argument range.
+ // - ranges are completely equal.
+
+ // First check for no overlap. The earlier range is not in the other set.
+ if (argument_range.from() > word_range.to()) {
+ // Ranges are disjoint. The earlier word range contains elements that
+ // cannot be in the argument set.
+ result.SetElementsInSecondSet();
+ } else if (word_range.from() > argument_range.to()) {
+ // Ranges are disjoint. The earlier argument range contains elements that
+ // cannot be in the word set.
+ result.SetElementsInFirstSet();
+ } else if (word_range.from() <= argument_range.from() &&
+ word_range.to() >= argument_range.from()) {
+ result.SetElementsInBothSets();
+ // argument range completely inside word range.
+ if (word_range.from() < argument_range.from() ||
+ word_range.to() > argument_range.from()) {
+ result.SetElementsInSecondSet();
+ }
+ } else if (word_range.from() >= argument_range.from() &&
+ word_range.to() <= argument_range.from()) {
+ result.SetElementsInBothSets();
+ result.SetElementsInFirstSet();
+ } else {
+ // There is overlap, and neither is a subrange of the other
+ result.SetElementsInFirstSet();
+ result.SetElementsInSecondSet();
+ result.SetElementsInBothSets();
+ }
+ if (result.NonTrivialIntersection()) {
+ // The result is as (im)precise as we can possibly make it.
+ return result;
+ }
+ // Progress the range(s) with minimal to-character.
+ uc16 word_to = word_range.to();
+ uc16 argument_to = argument_range.to();
+ if (argument_to <= word_to) {
+ j++;
+ if (j < range->length()) {
+ argument_range = range->at(j);
+ }
+ }
+ if (word_to <= argument_to) {
+ i += 2;
+ if (i < kWordRangeCount) {
+ word_range = CharacterRange(kWordRanges[i], kWordRanges[i + 1]);
+ }
+ }
+ }
+ // Check if anything wasn't compared in the loop.
+ if (i < kWordRangeCount) {
+ // word range contains something not in argument range.
+ result.SetElementsInSecondSet();
+ } else if (j < range->length()) {
+ // Argument range contains something not in word range.
+ result.SetElementsInFirstSet();
+ }
+
+ return result;
+}
+
+
static void AddUncanonicals(ZoneList<CharacterRange>* ranges,
int bottom,
int top) {
@@ -4119,6 +4263,287 @@
}
+// Move a number of elements in a zonelist to another position
+// in the same list. Handles overlapping source and target areas.
+static void MoveRanges(ZoneList<CharacterRange>* list,
+ int from,
+ int to,
+ int count) {
+ // Ranges are potentially overlapping.
+ if (from < to) {
+ for (int i = count - 1; i >= 0; i--) {
+ list->at(to + i) = list->at(from + i);
+ }
+ } else {
+ for (int i = 0; i < count; i++) {
+ list->at(to + i) = list->at(from + i);
+ }
+ }
+}
+
+
+static int InsertRangeInCanonicalList(ZoneList<CharacterRange>* list,
+ int count,
+ CharacterRange insert) {
+ // Inserts a range into list[0..count[, which must be sorted
+ // by from value and non-overlapping and non-adjacent, using at most
+ // list[0..count] for the result. Returns the number of resulting
+ // canonicalized ranges. Inserting a range may collapse existing ranges into
+ // fewer ranges, so the return value can be anything in the range 1..count+1.
+ uc16 from = insert.from();
+ uc16 to = insert.to();
+ int start_pos = 0;
+ int end_pos = count;
+ for (int i = count - 1; i >= 0; i--) {
+ CharacterRange current = list->at(i);
+ if (current.from() > to + 1) {
+ end_pos = i;
+ } else if (current.to() + 1 < from) {
+ start_pos = i + 1;
+ break;
+ }
+ }
+
+ // Inserted range overlaps, or is adjacent to, ranges at positions
+ // [start_pos..end_pos[. Ranges before start_pos or at or after end_pos are
+ // not affected by the insertion.
+ // If start_pos == end_pos, the range must be inserted before start_pos.
+ // if start_pos < end_pos, the entire range from start_pos to end_pos
+ // must be merged with the insert range.
+
+ if (start_pos == end_pos) {
+ // Insert between existing ranges at position start_pos.
+ if (start_pos < count) {
+ MoveRanges(list, start_pos, start_pos + 1, count - start_pos);
+ }
+ list->at(start_pos) = insert;
+ return count + 1;
+ }
+ if (start_pos + 1 == end_pos) {
+ // Replace single existing range at position start_pos.
+ CharacterRange to_replace = list->at(start_pos);
+ int new_from = Min(to_replace.from(), from);
+ int new_to = Max(to_replace.to(), to);
+ list->at(start_pos) = CharacterRange(new_from, new_to);
+ return count;
+ }
+ // Replace a number of existing ranges from start_pos to end_pos - 1.
+ // Move the remaining ranges down.
+
+ int new_from = Min(list->at(start_pos).from(), from);
+ int new_to = Max(list->at(end_pos - 1).to(), to);
+ if (end_pos < count) {
+ MoveRanges(list, end_pos, start_pos + 1, count - end_pos);
+ }
+ list->at(start_pos) = CharacterRange(new_from, new_to);
+ return count - (end_pos - start_pos) + 1;
+}
+
+
+void CharacterSet::Canonicalize() {
+ // Special/default classes are always considered canonical. The result
+ // of calling ranges() will be sorted.
+ if (ranges_ == NULL) return;
+ CharacterRange::Canonicalize(ranges_);
+}
+
+
+void CharacterRange::Canonicalize(ZoneList<CharacterRange>* character_ranges) {
+ if (character_ranges->length() <= 1) return;
+ // Check whether ranges are already canonical (increasing, non-overlapping,
+ // non-adjacent).
+ int n = character_ranges->length();
+ int max = character_ranges->at(0).to();
+ int i = 1;
+ while (i < n) {
+ CharacterRange current = character_ranges->at(i);
+ if (current.from() <= max + 1) {
+ break;
+ }
+ max = current.to();
+ i++;
+ }
+ // Canonical until the i'th range. If that's all of them, we are done.
+ if (i == n) return;
+
+ // The ranges at index i and forward are not canonicalized. Make them so by
+ // doing the equivalent of insertion sort (inserting each into the previous
+ // list, in order).
+ // Notice that inserting a range can reduce the number of ranges in the
+ // result due to combining of adjacent and overlapping ranges.
+ int read = i; // Range to insert.
+ int num_canonical = i; // Length of canonicalized part of list.
+ do {
+ num_canonical = InsertRangeInCanonicalList(character_ranges,
+ num_canonical,
+ character_ranges->at(read));
+ read++;
+ } while (read < n);
+ character_ranges->Rewind(num_canonical);
+
+ ASSERT(CharacterRange::IsCanonical(character_ranges));
+}
+
+
+// Utility function for CharacterRange::Merge. Adds a range at the end of
+// a canonicalized range list, if necessary merging the range with the last
+// range of the list.
+static void AddRangeToSet(ZoneList<CharacterRange>* set, CharacterRange range) {
+ if (set == NULL) return;
+ ASSERT(set->length() == 0 || set->at(set->length() - 1).to() < range.from());
+ int n = set->length();
+ if (n > 0) {
+ CharacterRange lastRange = set->at(n - 1);
+ if (lastRange.to() == range.from() - 1) {
+ set->at(n - 1) = CharacterRange(lastRange.from(), range.to());
+ return;
+ }
+ }
+ set->Add(range);
+}
+
+
+static void AddRangeToSelectedSet(int selector,
+ ZoneList<CharacterRange>* first_set,
+ ZoneList<CharacterRange>* second_set,
+ ZoneList<CharacterRange>* intersection_set,
+ CharacterRange range) {
+ switch (selector) {
+ case kInsideFirst:
+ AddRangeToSet(first_set, range);
+ break;
+ case kInsideSecond:
+ AddRangeToSet(second_set, range);
+ break;
+ case kInsideBoth:
+ AddRangeToSet(intersection_set, range);
+ break;
+ }
+}
+
+
+
+void CharacterRange::Merge(ZoneList<CharacterRange>* first_set,
+ ZoneList<CharacterRange>* second_set,
+ ZoneList<CharacterRange>* first_set_only_out,
+ ZoneList<CharacterRange>* second_set_only_out,
+ ZoneList<CharacterRange>* both_sets_out) {
+ // Inputs are canonicalized.
+ ASSERT(CharacterRange::IsCanonical(first_set));
+ ASSERT(CharacterRange::IsCanonical(second_set));
+ // Outputs are empty, if applicable.
+ ASSERT(first_set_only_out == NULL || first_set_only_out->length() == 0);
+ ASSERT(second_set_only_out == NULL || second_set_only_out->length() == 0);
+ ASSERT(both_sets_out == NULL || both_sets_out->length() == 0);
+
+ // Merge sets by iterating through the lists in order of lowest "from" value,
+ // and putting intervals into one of three sets.
+
+ if (first_set->length() == 0) {
+ second_set_only_out->AddAll(*second_set);
+ return;
+ }
+ if (second_set->length() == 0) {
+ first_set_only_out->AddAll(*first_set);
+ return;
+ }
+ // Indices into input lists.
+ int i1 = 0;
+ int i2 = 0;
+ // Cache length of input lists.
+ int n1 = first_set->length();
+ int n2 = second_set->length();
+ // Current range. May be invalid if state is kInsideNone.
+ int from = 0;
+ int to = -1;
+ // Where current range comes from.
+ int state = kInsideNone;
+
+ while (i1 < n1 || i2 < n2) {
+ CharacterRange next_range;
+ int range_source;
+ if (i2 == n2 || first_set->at(i1).from() < second_set->at(i2).from()) {
+ next_range = first_set->at(i1++);
+ range_source = kInsideFirst;
+ } else {
+ next_range = second_set->at(i2++);
+ range_source = kInsideSecond;
+ }
+ if (to < next_range.from()) {
+ // Ranges disjoint: |current| |next|
+ AddRangeToSelectedSet(state,
+ first_set_only_out,
+ second_set_only_out,
+ both_sets_out,
+ CharacterRange(from, to));
+ from = next_range.from();
+ to = next_range.to();
+ state = range_source;
+ } else {
+ if (from < next_range.from()) {
+ AddRangeToSelectedSet(state,
+ first_set_only_out,
+ second_set_only_out,
+ both_sets_out,
+ CharacterRange(from, next_range.from()-1));
+ }
+ if (to < next_range.to()) {
+ // Ranges overlap: |current|
+ // |next|
+ AddRangeToSelectedSet(state | range_source,
+ first_set_only_out,
+ second_set_only_out,
+ both_sets_out,
+ CharacterRange(next_range.from(), to));
+ from = to + 1;
+ to = next_range.to();
+ state = range_source;
+ } else {
+ // Range included: |current| , possibly ending at same character.
+ // |next|
+ AddRangeToSelectedSet(
+ state | range_source,
+ first_set_only_out,
+ second_set_only_out,
+ both_sets_out,
+ CharacterRange(next_range.from(), next_range.to()));
+ from = next_range.to() + 1;
+ // If ranges end at same character, both ranges are consumed completely.
+ if (next_range.to() == to) state = kInsideNone;
+ }
+ }
+ }
+ AddRangeToSelectedSet(state,
+ first_set_only_out,
+ second_set_only_out,
+ both_sets_out,
+ CharacterRange(from, to));
+}
+
+
+void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
+ ZoneList<CharacterRange>* negated_ranges) {
+ ASSERT(CharacterRange::IsCanonical(ranges));
+ ASSERT_EQ(0, negated_ranges->length());
+ int range_count = ranges->length();
+ uc16 from = 0;
+ int i = 0;
+ if (range_count > 0 && ranges->at(0).from() == 0) {
+ from = ranges->at(0).to();
+ i = 1;
+ }
+ while (i < range_count) {
+ CharacterRange range = ranges->at(i);
+ negated_ranges->Add(CharacterRange(from + 1, range.from() - 1));
+ from = range.to();
+ i++;
+ }
+ if (from < String::kMaxUC16CharCode) {
+ negated_ranges->Add(CharacterRange(from + 1, String::kMaxUC16CharCode));
+ }
+}
+
+
// -------------------------------------------------------------------
// Interest propagation
@@ -4410,9 +4835,203 @@
void Analysis::VisitAssertion(AssertionNode* that) {
EnsureAnalyzed(that->on_success());
+ AssertionNode::AssertionNodeType type = that->type();
+ if (type == AssertionNode::AT_BOUNDARY ||
+ type == AssertionNode::AT_NON_BOUNDARY) {
+ // Check if the following character is known to be a word character
+ // or known to not be a word character.
+ ZoneList<CharacterRange>* following_chars = that->FirstCharacterSet();
+
+ CharacterRange::Canonicalize(following_chars);
+
+ SetRelation word_relation =
+ CharacterRange::WordCharacterRelation(following_chars);
+ if (word_relation.ContainedIn()) {
+ // Following character is definitely a word character.
+ type = (type == AssertionNode::AT_BOUNDARY) ?
+ AssertionNode::AFTER_NONWORD_CHARACTER :
+ AssertionNode::AFTER_WORD_CHARACTER;
+ that->set_type(type);
+ } else if (word_relation.Disjoint()) {
+ // Following character is definitely *not* a word character.
+ type = (type == AssertionNode::AT_BOUNDARY) ?
+ AssertionNode::AFTER_WORD_CHARACTER :
+ AssertionNode::AFTER_NONWORD_CHARACTER;
+ that->set_type(type);
+ }
+ }
}
+ZoneList<CharacterRange>* RegExpNode::FirstCharacterSet() {
+ if (first_character_set_ == NULL) {
+ if (ComputeFirstCharacterSet(kFirstCharBudget) < 0) {
+ // If we can't find an exact solution within the budget, we
+ // set the value to the set of every character, i.e., all characters
+ // are possible.
+ ZoneList<CharacterRange>* all_set = new ZoneList<CharacterRange>(1);
+ all_set->Add(CharacterRange::Everything());
+ first_character_set_ = all_set;
+ }
+ }
+ return first_character_set_;
+}
+
+
+int RegExpNode::ComputeFirstCharacterSet(int budget) {
+ // Default behavior is to not be able to determine the first character.
+ return kComputeFirstCharacterSetFail;
+}
+
+
+int LoopChoiceNode::ComputeFirstCharacterSet(int budget) {
+ budget--;
+ if (budget >= 0) {
+ // Find loop min-iteration. It's the value of the guarded choice node
+ // with a GEQ guard, if any.
+ int min_repetition = 0;
+
+ for (int i = 0; i <= 1; i++) {
+ GuardedAlternative alternative = alternatives()->at(i);
+ ZoneList<Guard*>* guards = alternative.guards();
+ if (guards != NULL && guards->length() > 0) {
+ Guard* guard = guards->at(0);
+ if (guard->op() == Guard::GEQ) {
+ min_repetition = guard->value();
+ break;
+ }
+ }
+ }
+
+ budget = loop_node()->ComputeFirstCharacterSet(budget);
+ if (budget >= 0) {
+ ZoneList<CharacterRange>* character_set =
+ loop_node()->first_character_set();
+ if (body_can_be_zero_length() || min_repetition == 0) {
+ budget = continue_node()->ComputeFirstCharacterSet(budget);
+ if (budget < 0) return budget;
+ ZoneList<CharacterRange>* body_set =
+ continue_node()->first_character_set();
+ ZoneList<CharacterRange>* union_set =
+ new ZoneList<CharacterRange>(Max(character_set->length(),
+ body_set->length()));
+ CharacterRange::Merge(character_set,
+ body_set,
+ union_set,
+ union_set,
+ union_set);
+ character_set = union_set;
+ }
+ set_first_character_set(character_set);
+ }
+ }
+ return budget;
+}
+
+
+int NegativeLookaheadChoiceNode::ComputeFirstCharacterSet(int budget) {
+ budget--;
+ if (budget >= 0) {
+ GuardedAlternative successor = this->alternatives()->at(1);
+ RegExpNode* successor_node = successor.node();
+ budget = successor_node->ComputeFirstCharacterSet(budget);
+ if (budget >= 0) {
+ set_first_character_set(successor_node->first_character_set());
+ }
+ }
+ return budget;
+}
+
+
+// The first character set of an EndNode is unknowable. Just use the
+// default implementation that fails and returns all characters as possible.
+
+
+int AssertionNode::ComputeFirstCharacterSet(int budget) {
+ budget -= 1;
+ if (budget >= 0) {
+ switch (type_) {
+ case AT_END: {
+ set_first_character_set(new ZoneList<CharacterRange>(0));
+ break;
+ }
+ case AT_START:
+ case AT_BOUNDARY:
+ case AT_NON_BOUNDARY:
+ case AFTER_NEWLINE:
+ case AFTER_NONWORD_CHARACTER:
+ case AFTER_WORD_CHARACTER: {
+ ASSERT_NOT_NULL(on_success());
+ budget = on_success()->ComputeFirstCharacterSet(budget);
+ set_first_character_set(on_success()->first_character_set());
+ break;
+ }
+ }
+ }
+ return budget;
+}
+
+
+int ActionNode::ComputeFirstCharacterSet(int budget) {
+ if (type_ == POSITIVE_SUBMATCH_SUCCESS) return kComputeFirstCharacterSetFail;
+ budget--;
+ if (budget >= 0) {
+ ASSERT_NOT_NULL(on_success());
+ budget = on_success()->ComputeFirstCharacterSet(budget);
+ if (budget >= 0) {
+ set_first_character_set(on_success()->first_character_set());
+ }
+ }
+ return budget;
+}
+
+
+int BackReferenceNode::ComputeFirstCharacterSet(int budget) {
+ // We don't know anything about the first character of a backreference
+ // at this point.
+ return kComputeFirstCharacterSetFail;
+}
+
+
+int TextNode::ComputeFirstCharacterSet(int budget) {
+ budget--;
+ if (budget >= 0) {
+ ASSERT_NE(0, elements()->length());
+ TextElement text = elements()->at(0);
+ if (text.type == TextElement::ATOM) {
+ RegExpAtom* atom = text.data.u_atom;
+ ASSERT_NE(0, atom->length());
+ uc16 first_char = atom->data()[0];
+ ZoneList<CharacterRange>* range = new ZoneList<CharacterRange>(1);
+ range->Add(CharacterRange(first_char, first_char));
+ set_first_character_set(range);
+ } else {
+ ASSERT(text.type == TextElement::CHAR_CLASS);
+ RegExpCharacterClass* char_class = text.data.u_char_class;
+ if (char_class->is_negated()) {
+ ZoneList<CharacterRange>* ranges = char_class->ranges();
+ int length = ranges->length();
+ int new_length = length + 1;
+ if (length > 0) {
+ if (ranges->at(0).from() == 0) new_length--;
+ if (ranges->at(length - 1).to() == String::kMaxUC16CharCode) {
+ new_length--;
+ }
+ }
+ ZoneList<CharacterRange>* negated_ranges =
+ new ZoneList<CharacterRange>(new_length);
+ CharacterRange::Negate(ranges, negated_ranges);
+ set_first_character_set(negated_ranges);
+ } else {
+ set_first_character_set(char_class->ranges());
+ }
+ }
+ }
+ return budget;
+}
+
+
+
// -------------------------------------------------------------------
// Dispatch table construction
@@ -4471,7 +5090,6 @@
}
-
static int CompareRangeByFrom(const CharacterRange* a,
const CharacterRange* b) {
return Compare<uc16>(a->from(), b->from());
@@ -4606,4 +5224,8 @@
pattern);
}
+
+int OffsetsVector::static_offsets_vector_[
+ OffsetsVector::kStaticOffsetsVectorSize];
+
}} // namespace v8::internal
diff --git a/src/jsregexp.h b/src/jsregexp.h
index b681119..b99a89e 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -75,13 +75,6 @@
int index,
Handle<JSArray> lastMatchInfo);
- // Call RegExp.prototyp.exec(string) in a loop.
- // Used by String.prototype.match and String.prototype.replace.
- // This function calls the garbage collector if necessary.
- static Handle<Object> ExecGlobal(Handle<JSRegExp> regexp,
- Handle<String> subject,
- Handle<JSArray> lastMatchInfo);
-
// Prepares a JSRegExp object with Irregexp-specific data.
static void IrregexpPrepare(Handle<JSRegExp> re,
Handle<String> pattern,
@@ -108,13 +101,23 @@
int index,
Handle<JSArray> lastMatchInfo);
- // Offsets in the lastMatchInfo array.
+ // Array index in the lastMatchInfo array.
static const int kLastCaptureCount = 0;
static const int kLastSubject = 1;
static const int kLastInput = 2;
static const int kFirstCapture = 3;
static const int kLastMatchOverhead = 3;
+ // Direct offset into the lastMatchInfo array.
+ static const int kLastCaptureCountOffset =
+ FixedArray::kHeaderSize + kLastCaptureCount * kPointerSize;
+ static const int kLastSubjectOffset =
+ FixedArray::kHeaderSize + kLastSubject * kPointerSize;
+ static const int kLastInputOffset =
+ FixedArray::kHeaderSize + kLastInput * kPointerSize;
+ static const int kFirstCaptureOffset =
+ FixedArray::kHeaderSize + kFirstCapture * kPointerSize;
+
// Used to access the lastMatchInfo array.
static int GetCapture(FixedArray* array, int index) {
return Smi::cast(array->get(index + kFirstCapture))->value();
@@ -174,6 +177,57 @@
};
+// Represents the location of one element relative to the intersection of
+// two sets. Corresponds to the four areas of a Venn diagram.
+enum ElementInSetsRelation {
+ kInsideNone = 0,
+ kInsideFirst = 1,
+ kInsideSecond = 2,
+ kInsideBoth = 3
+};
+
+
+// Represents the relation of two sets.
+// Sets can be either disjoint, partially or fully overlapping, or equal.
+class SetRelation BASE_EMBEDDED {
+ public:
+ // Relation is represented by a bit saying whether there are elements in
+ // one set that is not in the other, and a bit saying that there are elements
+ // that are in both sets.
+
+ // Location of an element. Corresponds to the internal areas of
+ // a Venn diagram.
+ enum {
+ kInFirst = 1 << kInsideFirst,
+ kInSecond = 1 << kInsideSecond,
+ kInBoth = 1 << kInsideBoth
+ };
+ SetRelation() : bits_(0) {}
+ ~SetRelation() {}
+ // Add the existence of objects in a particular
+ void SetElementsInFirstSet() { bits_ |= kInFirst; }
+ void SetElementsInSecondSet() { bits_ |= kInSecond; }
+ void SetElementsInBothSets() { bits_ |= kInBoth; }
+ // Check the currently known relation of the sets (common functions only,
+ // for other combinations, use value() to get the bits and check them
+ // manually).
+ // Sets are completely disjoint.
+ bool Disjoint() { return (bits_ & kInBoth) == 0; }
+ // Sets are equal.
+ bool Equals() { return (bits_ & (kInFirst | kInSecond)) == 0; }
+ // First set contains second.
+ bool Contains() { return (bits_ & kInSecond) == 0; }
+ // Second set contains first.
+ bool ContainedIn() { return (bits_ & kInFirst) == 0; }
+ bool NonTrivialIntersection() {
+ return (bits_ == (kInFirst | kInSecond | kInBoth));
+ }
+ int value() { return bits_; }
+ private:
+ int bits_;
+};
+
+
class CharacterRange {
public:
CharacterRange() : from_(0), to_(0) { }
@@ -205,7 +259,39 @@
Vector<const uc16> overlay,
ZoneList<CharacterRange>** included,
ZoneList<CharacterRange>** excluded);
-
+ // Whether a range list is in canonical form: Ranges ordered by from value,
+ // and ranges non-overlapping and non-adjacent.
+ static bool IsCanonical(ZoneList<CharacterRange>* ranges);
+ // Convert range list to canonical form. The characters covered by the ranges
+ // will still be the same, but no character is in more than one range, and
+ // adjacent ranges are merged. The resulting list may be shorter than the
+ // original, but cannot be longer.
+ static void Canonicalize(ZoneList<CharacterRange>* ranges);
+ // Check how the set of characters defined by a CharacterRange list relates
+ // to the set of word characters. List must be in canonical form.
+ static SetRelation WordCharacterRelation(ZoneList<CharacterRange>* ranges);
+ // Takes two character range lists (representing character sets) in canonical
+ // form and merges them.
+ // The characters that are only covered by the first set are added to
+ // first_set_only_out. the characters that are only in the second set are
+ // added to second_set_only_out, and the characters that are in both are
+ // added to both_sets_out.
+ // The pointers to first_set_only_out, second_set_only_out and both_sets_out
+ // should be to empty lists, but they need not be distinct, and may be NULL.
+ // If NULL, the characters are dropped, and if two arguments are the same
+ // pointer, the result is the union of the two sets that would be created
+ // if the pointers had been distinct.
+ // This way, the Merge function can compute all the usual set operations:
+ // union (all three out-sets are equal), intersection (only both_sets_out is
+ // non-NULL), and set difference (only first_set is non-NULL).
+ static void Merge(ZoneList<CharacterRange>* first_set,
+ ZoneList<CharacterRange>* second_set,
+ ZoneList<CharacterRange>* first_set_only_out,
+ ZoneList<CharacterRange>* second_set_only_out,
+ ZoneList<CharacterRange>* both_sets_out);
+ // Negate the contents of a character range in canonical form.
+ static void Negate(ZoneList<CharacterRange>* src,
+ ZoneList<CharacterRange>* dst);
static const int kRangeCanonicalizeMax = 0x346;
static const int kStartMarker = (1 << 24);
static const int kPayloadMask = (1 << 24) - 1;
@@ -479,7 +565,7 @@
class RegExpNode: public ZoneObject {
public:
- RegExpNode() : trace_count_(0) { }
+ RegExpNode() : first_character_set_(NULL), trace_count_(0) { }
virtual ~RegExpNode();
virtual void Accept(NodeVisitor* visitor) = 0;
// Generates a goto to this node or actually generates the code at this point.
@@ -530,8 +616,29 @@
SiblingList* siblings() { return &siblings_; }
void set_siblings(SiblingList* other) { siblings_ = *other; }
+ // Return the set of possible next characters recognized by the regexp
+ // (or a safe subset, potentially the set of all characters).
+ ZoneList<CharacterRange>* FirstCharacterSet();
+
+ // Compute (if possible within the budget of traversed nodes) the
+ // possible first characters of the input matched by this node and
+ // its continuation. Returns the remaining budget after the computation.
+ // If the budget is spent, the result is negative, and the cached
+ // first_character_set_ value isn't set.
+ virtual int ComputeFirstCharacterSet(int budget);
+
+ // Get and set the cached first character set value.
+ ZoneList<CharacterRange>* first_character_set() {
+ return first_character_set_;
+ }
+ void set_first_character_set(ZoneList<CharacterRange>* character_set) {
+ first_character_set_ = character_set;
+ }
+
protected:
enum LimitResult { DONE, CONTINUE };
+ static const int kComputeFirstCharacterSetFail = -1;
+
LimitResult LimitVersions(RegExpCompiler* compiler, Trace* trace);
// Returns a sibling of this node whose interests and assumptions
@@ -552,9 +659,11 @@
virtual RegExpNode* Clone() = 0;
private:
+ static const int kFirstCharBudget = 10;
Label label_;
NodeInfo info_;
SiblingList siblings_;
+ ZoneList<CharacterRange>* first_character_set_;
// This variable keeps track of how many times code has been generated for
// this node (in different traces). We don't keep track of where the
// generated code is located unless the code is generated at the start of
@@ -645,7 +754,7 @@
// TODO(erikcorry): We should allow some action nodes in greedy loops.
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
virtual ActionNode* Clone() { return new ActionNode(*this); }
-
+ virtual int ComputeFirstCharacterSet(int budget);
private:
union {
struct {
@@ -711,7 +820,7 @@
return result;
}
void CalculateOffsets();
-
+ virtual int ComputeFirstCharacterSet(int budget);
private:
enum TextEmitPassType {
NON_ASCII_MATCH, // Check for characters that can't match.
@@ -741,7 +850,12 @@
AT_START,
AT_BOUNDARY,
AT_NON_BOUNDARY,
- AFTER_NEWLINE
+ AFTER_NEWLINE,
+ // Types not directly expressible in regexp syntax.
+ // Used for modifying a boundary node if its following character is
+ // known to be word and/or non-word.
+ AFTER_NONWORD_CHARACTER,
+ AFTER_WORD_CHARACTER
};
static AssertionNode* AtEnd(RegExpNode* on_success) {
return new AssertionNode(AT_END, on_success);
@@ -765,8 +879,10 @@
RegExpCompiler* compiler,
int filled_in,
bool not_at_start);
+ virtual int ComputeFirstCharacterSet(int budget);
virtual AssertionNode* Clone() { return new AssertionNode(*this); }
AssertionNodeType type() { return type_; }
+ void set_type(AssertionNodeType type) { type_ = type; }
private:
AssertionNode(AssertionNodeType t, RegExpNode* on_success)
: SeqRegExpNode(on_success), type_(t) { }
@@ -794,7 +910,7 @@
return;
}
virtual BackReferenceNode* Clone() { return new BackReferenceNode(*this); }
-
+ virtual int ComputeFirstCharacterSet(int budget);
private:
int start_reg_;
int end_reg_;
@@ -816,7 +932,6 @@
UNREACHABLE();
}
virtual EndNode* Clone() { return new EndNode(*this); }
-
private:
Action action_;
};
@@ -950,6 +1065,7 @@
// characters, but on a negative lookahead the negative branch did not take
// part in that calculation (EatsAtLeast) so the assumptions don't hold.
virtual bool try_to_emit_quick_check_for_alternative(int i) { return i != 0; }
+ virtual int ComputeFirstCharacterSet(int budget);
};
@@ -968,6 +1084,7 @@
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
+ virtual int ComputeFirstCharacterSet(int budget);
virtual LoopChoiceNode* Clone() { return new LoopChoiceNode(*this); }
RegExpNode* loop_node() { return loop_node_; }
RegExpNode* continue_node() { return continue_node_; }
@@ -1123,7 +1240,7 @@
void set_backtrack(Label* backtrack) { backtrack_ = backtrack; }
void set_stop_node(RegExpNode* node) { stop_node_ = node; }
void set_loop_label(Label* label) { loop_label_ = label; }
- void set_characters_preloaded(int cpre) { characters_preloaded_ = cpre; }
+ void set_characters_preloaded(int count) { characters_preloaded_ = count; }
void set_bound_checked_up_to(int to) { bound_checked_up_to_ = to; }
void set_flush_budget(int to) { flush_budget_ = to; }
void set_quick_check_performed(QuickCheckDetails* d) {
@@ -1283,6 +1400,40 @@
};
+class OffsetsVector {
+ public:
+ inline OffsetsVector(int num_registers)
+ : offsets_vector_length_(num_registers) {
+ if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
+ vector_ = NewArray<int>(offsets_vector_length_);
+ } else {
+ vector_ = static_offsets_vector_;
+ }
+ }
+ inline ~OffsetsVector() {
+ if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
+ DeleteArray(vector_);
+ vector_ = NULL;
+ }
+ }
+ inline int* vector() { return vector_; }
+ inline int length() { return offsets_vector_length_; }
+
+ static const int kStaticOffsetsVectorSize = 50;
+
+ private:
+ static Address static_offsets_vector_address() {
+ return reinterpret_cast<Address>(&static_offsets_vector_);
+ }
+
+ int* vector_;
+ int offsets_vector_length_;
+ static int static_offsets_vector_[kStaticOffsetsVectorSize];
+
+ friend class ExternalReference;
+};
+
+
} } // namespace v8::internal
#endif // V8_JSREGEXP_H_
diff --git a/src/jump-target.h b/src/jump-target.h
index 0933ee7..dd291c6 100644
--- a/src/jump-target.h
+++ b/src/jump-target.h
@@ -112,7 +112,8 @@
// Emit a conditional branch to the target. There must be a current
// frame at the branch. The current frame will fall through to the
- // code after the branch.
+ // code after the branch. The arg is a result that is live both at
+ // the target and the fall-through.
virtual void Branch(Condition cc, Hint hint = no_hint);
virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
diff --git a/src/log.cc b/src/log.cc
index bbce926..98dd562 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -871,6 +871,23 @@
}
+void Logger::SnapshotPositionEvent(Address addr, int pos) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (!Log::IsEnabled() || !FLAG_log_snapshot_positions) return;
+ LogMessageBuilder msg;
+ msg.Append("%s,", log_events_[SNAPSHOT_POSITION_EVENT]);
+ msg.AppendAddress(addr);
+ msg.Append(",%d", pos);
+ if (FLAG_compress_log) {
+ ASSERT(compression_helper_ != NULL);
+ if (!compression_helper_->HandleMessage(&msg)) return;
+ }
+ msg.Append('\n');
+ msg.WriteToLogFile();
+#endif
+}
+
+
void Logger::ResourceEvent(const char* name, const char* tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log) return;
diff --git a/src/log.h b/src/log.h
index 4d5acce..e21df03 100644
--- a/src/log.h
+++ b/src/log.h
@@ -116,6 +116,7 @@
V(CODE_CREATION_EVENT, "code-creation", "cc") \
V(CODE_MOVE_EVENT, "code-move", "cm") \
V(CODE_DELETE_EVENT, "code-delete", "cd") \
+ V(SNAPSHOT_POSITION_EVENT, "snapshot-pos", "sp") \
V(TICK_EVENT, "tick", "t") \
V(REPEAT_META_EVENT, "repeat", "r") \
V(BUILTIN_TAG, "Builtin", "bi") \
@@ -224,6 +225,8 @@
// Emits a code delete event.
static void CodeDeleteEvent(Address from);
+ static void SnapshotPositionEvent(Address addr, int pos);
+
// ==== Events logged by --log-gc. ====
// Heap sampling events: start, end, and individual types.
static void HeapSampleBeginEvent(const char* space, const char* kind);
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index 63a6d6e..0fe4328 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -77,8 +77,13 @@
#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#include "assembler.h"
+#ifdef V8_ARM_VARIANT_THUMB
+#include "arm/assembler-thumb2.h"
+#include "arm/assembler-thumb2-inl.h"
+#else
#include "arm/assembler-arm.h"
#include "arm/assembler-arm-inl.h"
+#endif
#include "code.h" // must be after assembler_*.h
#include "arm/macro-assembler-arm.h"
#else
diff --git a/src/macros.py b/src/macros.py
index 5b06099..1e436a0 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -92,12 +92,13 @@
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
-macro FLOOR(arg) = %Math_floor(arg);
+macro FLOOR(arg) = $floor(arg);
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInteger(arg));
-macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInt32(arg));
+macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
+macro TO_UINT32(arg) = (arg >>> 0);
# Macros implemented in Python.
python macro CHAR_CODE(str) = ord(str[1]);
@@ -117,6 +118,14 @@
# Gets the value of a Date object. If arg is not a Date object
# a type error is thrown.
macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
+macro DAY(time) = ($floor(time / 86400000));
+macro MONTH_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).month);
+macro DATE_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).date);
+macro YEAR_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).year);
+macro HOUR_FROM_TIME(time) = (Modulo($floor(time / 3600000), 24));
+macro MIN_FROM_TIME(time) = (Modulo($floor(time / 60000), 60));
+macro SEC_FROM_TIME(time) = (Modulo($floor(time / 1000), 60));
+macro MS_FROM_TIME(time) = (Modulo(time, 1000));
# Last input and last subject of regexp matches.
macro LAST_SUBJECT(array) = ((array)[1]);
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 81819b7..e284b42 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -116,6 +116,8 @@
compact_on_next_gc_ = false;
if (FLAG_never_compact) compacting_collection_ = false;
+ if (!Heap::map_space()->MapPointersEncodable())
+ compacting_collection_ = false;
if (FLAG_collect_maps) CreateBackPointers();
#ifdef DEBUG
@@ -155,6 +157,8 @@
// objects (empty string, illegal builtin).
StubCache::Clear();
+ ExternalStringTable::CleanUp();
+
// If we've just compacted old space there's no reason to check the
// fragmentation limit. Just return.
if (HasCompacted()) return;
@@ -369,41 +373,18 @@
class SymbolTableCleaner : public ObjectVisitor {
public:
SymbolTableCleaner() : pointers_removed_(0) { }
- void VisitPointers(Object** start, Object** end) {
+
+ virtual void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
// Check if the symbol being pruned is an external symbol. We need to
// delete the associated external data as this symbol is going away.
- // Since the object is not marked we can access its map word safely
- // without having to worry about marking bits in the object header.
- Map* map = HeapObject::cast(*p)->map();
// Since no objects have yet been moved we can safely access the map of
// the object.
- uint32_t type = map->instance_type();
- bool is_external = (type & kStringRepresentationMask) ==
- kExternalStringTag;
- if (is_external) {
- bool is_two_byte = (type & kStringEncodingMask) == kTwoByteStringTag;
- byte* resource_addr = reinterpret_cast<byte*>(*p) +
- ExternalString::kResourceOffset -
- kHeapObjectTag;
- if (is_two_byte) {
- v8::String::ExternalStringResource** resource =
- reinterpret_cast<v8::String::ExternalStringResource**>
- (resource_addr);
- delete *resource;
- // Clear the resource pointer in the symbol.
- *resource = NULL;
- } else {
- v8::String::ExternalAsciiStringResource** resource =
- reinterpret_cast<v8::String::ExternalAsciiStringResource**>
- (resource_addr);
- delete *resource;
- // Clear the resource pointer in the symbol.
- *resource = NULL;
- }
+ if ((*p)->IsExternalString()) {
+ Heap::FinalizeExternalString(String::cast(*p));
}
// Set the entry to null_value (as deleted).
*p = Heap::raw_unchecked_null_value();
@@ -546,34 +527,7 @@
}
-class SymbolMarkingVisitor : public ObjectVisitor {
- public:
- void VisitPointers(Object** start, Object** end) {
- MarkingVisitor marker;
- for (Object** p = start; p < end; p++) {
- if (!(*p)->IsHeapObject()) continue;
-
- HeapObject* object = HeapObject::cast(*p);
- // If the object is marked, we have marked or are in the process
- // of marking subparts.
- if (object->IsMarked()) continue;
-
- // The object is unmarked, we do not need to unmark to use its
- // map.
- Map* map = object->map();
- object->IterateBody(map->instance_type(),
- object->SizeFromMap(map),
- &marker);
- }
- }
-};
-
-
void MarkCompactCollector::MarkSymbolTable() {
- // Objects reachable from symbols are marked as live so as to ensure
- // that if the symbol itself remains alive after GC for any reason,
- // and if it is a cons string backed by an external string (even indirectly),
- // then the external string does not receive a weak reference callback.
SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
// Mark the symbol table itself.
SetMark(symbol_table);
@@ -581,11 +535,6 @@
MarkingVisitor marker;
symbol_table->IteratePrefix(&marker);
ProcessMarkingStack(&marker);
- // Mark subparts of the symbols but not the symbols themselves
- // (unless reachable from another symbol).
- SymbolMarkingVisitor symbol_marker;
- symbol_table->IterateElements(&symbol_marker);
- ProcessMarkingStack(&marker);
}
@@ -774,6 +723,8 @@
SymbolTableCleaner v;
symbol_table->IterateElements(&v);
symbol_table->ElementsRemoved(v.PointersRemoved());
+ ExternalStringTable::Iterate(&v);
+ ExternalStringTable::CleanUp();
// Remove object groups after marking phase.
GlobalHandles::RemoveObjectGroups();
@@ -840,7 +791,7 @@
// back pointers, reversing them all at once. This allows us to find
// those maps with map transitions that need to be nulled, and only
// scan the descriptor arrays of those maps, not all maps.
- // All of these actions are carried out only on maps of JSObects
+ // All of these actions are carried out only on maps of JSObjects
// and related subtypes.
while (map_iterator.has_next()) {
Map* map = reinterpret_cast<Map*>(map_iterator.next());
@@ -887,11 +838,8 @@
// space are encoded in their map pointer word (along with an encoding of
// their map pointers).
//
-// 31 21 20 10 9 0
-// +-----------------+------------------+-----------------+
-// |forwarding offset|page offset of map|page index of map|
-// +-----------------+------------------+-----------------+
-// 11 bits 11 bits 10 bits
+// The excact encoding is described in the comments for class MapWord in
+// objects.h.
//
// An address range [start, end) can have both live and non-live objects.
// Maximal non-live regions are marked so they can be skipped on subsequent
@@ -1220,7 +1168,7 @@
void MarkCompactCollector::DeallocateMapBlock(Address start,
int size_in_bytes) {
- // Objects in map space are frequently assumed to have size Map::kSize and a
+ // Objects in map space are assumed to have size Map::kSize and a
// valid map in their first word. Thus, we break the free block up into
// chunks and free them separately.
ASSERT(size_in_bytes % Map::kSize == 0);
@@ -1294,6 +1242,225 @@
}
+class MapIterator : public HeapObjectIterator {
+ public:
+ MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
+
+ explicit MapIterator(Address start)
+ : HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
+
+ private:
+ static int SizeCallback(HeapObject* unused) {
+ USE(unused);
+ return Map::kSize;
+ }
+};
+
+
+class MapCompact {
+ public:
+ explicit MapCompact(int live_maps)
+ : live_maps_(live_maps),
+ to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
+ map_to_evacuate_it_(to_evacuate_start_),
+ first_map_to_evacuate_(
+ reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
+ }
+
+ void CompactMaps() {
+ // As we know the number of maps to evacuate beforehand,
+ // we stop then there is no more vacant maps.
+ for (Map* next_vacant_map = NextVacantMap();
+ next_vacant_map;
+ next_vacant_map = NextVacantMap()) {
+ EvacuateMap(next_vacant_map, NextMapToEvacuate());
+ }
+
+#ifdef DEBUG
+ CheckNoMapsToEvacuate();
+#endif
+ }
+
+ void UpdateMapPointersInRoots() {
+ Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
+ GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
+ }
+
+ void FinishMapSpace() {
+ // Iterate through to space and finish move.
+ MapIterator it;
+ HeapObject* o = it.next();
+ for (; o != first_map_to_evacuate_; o = it.next()) {
+ Map* map = reinterpret_cast<Map*>(o);
+ ASSERT(!map->IsMarked());
+ ASSERT(!map->IsOverflowed());
+ ASSERT(map->IsMap());
+ Heap::UpdateRSet(map);
+ }
+ }
+
+ void UpdateMapPointersInPagedSpace(PagedSpace* space) {
+ ASSERT(space != Heap::map_space());
+
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+ while (it.has_next()) {
+ Page* p = it.next();
+ UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
+ }
+ }
+
+ void UpdateMapPointersInNewSpace() {
+ NewSpace* space = Heap::new_space();
+ UpdateMapPointersInRange(space->bottom(), space->top());
+ }
+
+ void UpdateMapPointersInLargeObjectSpace() {
+ LargeObjectIterator it(Heap::lo_space());
+ while (true) {
+ if (!it.has_next()) break;
+ UpdateMapPointersInObject(it.next());
+ }
+ }
+
+ void Finish() {
+ Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
+ }
+
+ private:
+ int live_maps_;
+ Address to_evacuate_start_;
+ MapIterator vacant_map_it_;
+ MapIterator map_to_evacuate_it_;
+ Map* first_map_to_evacuate_;
+
+ // Helper class for updating map pointers in HeapObjects.
+ class MapUpdatingVisitor: public ObjectVisitor {
+ public:
+ void VisitPointer(Object** p) {
+ UpdateMapPointer(p);
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) UpdateMapPointer(p);
+ }
+
+ private:
+ void UpdateMapPointer(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+ HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
+
+ // Moved maps are tagged with overflowed map word. They are the only
+ // objects those map word is overflowed as marking is already complete.
+ MapWord map_word = old_map->map_word();
+ if (!map_word.IsOverflowed()) return;
+
+ *p = GetForwardedMap(map_word);
+ }
+ };
+
+ static MapUpdatingVisitor map_updating_visitor_;
+
+ static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
+ while (true) {
+ ASSERT(it->has_next());
+ HeapObject* next = it->next();
+ if (next == last)
+ return NULL;
+ ASSERT(!next->IsOverflowed());
+ ASSERT(!next->IsMarked());
+ ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
+ if (next->IsMap() == live)
+ return reinterpret_cast<Map*>(next);
+ }
+ }
+
+ Map* NextVacantMap() {
+ Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
+ ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
+ return map;
+ }
+
+ Map* NextMapToEvacuate() {
+ Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
+ ASSERT(map != NULL);
+ ASSERT(map->IsMap());
+ return map;
+ }
+
+ static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
+ ASSERT(FreeListNode::IsFreeListNode(vacant_map));
+ ASSERT(map_to_evacuate->IsMap());
+
+ memcpy(
+ reinterpret_cast<void*>(vacant_map->address()),
+ reinterpret_cast<void*>(map_to_evacuate->address()),
+ Map::kSize);
+ ASSERT(vacant_map->IsMap()); // Due to memcpy above.
+
+ MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
+ forwarding_map_word.SetOverflow();
+ map_to_evacuate->set_map_word(forwarding_map_word);
+
+ ASSERT(map_to_evacuate->map_word().IsOverflowed());
+ ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
+ }
+
+ static Map* GetForwardedMap(MapWord map_word) {
+ ASSERT(map_word.IsOverflowed());
+ map_word.ClearOverflow();
+ Map* new_map = map_word.ToMap();
+ ASSERT_MAP_ALIGNED(new_map->address());
+ return new_map;
+ }
+
+ static int UpdateMapPointersInObject(HeapObject* obj) {
+ ASSERT(!obj->IsMarked());
+ Map* map = obj->map();
+ ASSERT(Heap::map_space()->Contains(map));
+ MapWord map_word = map->map_word();
+ ASSERT(!map_word.IsMarked());
+ if (map_word.IsOverflowed()) {
+ Map* new_map = GetForwardedMap(map_word);
+ ASSERT(Heap::map_space()->Contains(new_map));
+ obj->set_map(new_map);
+
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("update %p : %p -> %p\n", obj->address(),
+ map, new_map);
+ }
+#endif
+ }
+
+ int size = obj->SizeFromMap(map);
+ obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
+ return size;
+ }
+
+ static void UpdateMapPointersInRange(Address start, Address end) {
+ HeapObject* object;
+ int size;
+ for (Address current = start; current < end; current += size) {
+ object = HeapObject::FromAddress(current);
+ size = UpdateMapPointersInObject(object);
+ ASSERT(size > 0);
+ }
+ }
+
+#ifdef DEBUG
+ void CheckNoMapsToEvacuate() {
+ if (!FLAG_enable_slow_asserts)
+ return;
+
+ while (map_to_evacuate_it_.has_next())
+ ASSERT(FreeListNode::IsFreeListNode(map_to_evacuate_it_.next()));
+ }
+#endif
+};
+
+MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
+
+
void MarkCompactCollector::SweepSpaces() {
ASSERT(state_ == SWEEP_SPACES);
ASSERT(!IsCompacting());
@@ -1308,6 +1475,26 @@
SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
SweepSpace(Heap::new_space());
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
+ int live_maps = Heap::map_space()->Size() / Map::kSize;
+ ASSERT(live_map_objects_ == live_maps);
+
+ if (Heap::map_space()->NeedsCompaction(live_maps)) {
+ MapCompact map_compact(live_maps);
+
+ map_compact.CompactMaps();
+ map_compact.UpdateMapPointersInRoots();
+
+ map_compact.FinishMapSpace();
+ PagedSpaces spaces;
+ while (PagedSpace* space = spaces.next()) {
+ if (space == Heap::map_space()) continue;
+ map_compact.UpdateMapPointersInPagedSpace(space);
+ }
+ map_compact.UpdateMapPointersInNewSpace();
+ map_compact.UpdateMapPointersInLargeObjectSpace();
+
+ map_compact.Finish();
+ }
}
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 2da2b1f..02aedb3 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -92,7 +92,15 @@
static bool HasCompacted() { return compacting_collection_; }
// True after the Prepare phase if the compaction is taking place.
- static bool IsCompacting() { return compacting_collection_; }
+ static bool IsCompacting() {
+#ifdef DEBUG
+ // For the purposes of asserts we don't want this to keep returning true
+ // after the collection is completed.
+ return state_ != IDLE && compacting_collection_;
+#else
+ return compacting_collection_;
+#endif
+ }
// The count of the number of objects left marked at the end of the last
// completed full GC (expected to be zero).
diff --git a/src/math.js b/src/math.js
index 7191896..d804648 100644
--- a/src/math.js
+++ b/src/math.js
@@ -84,7 +84,7 @@
// ECMA 262 - 15.8.2.7
function MathCos(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
- return %_Math_cos(x);
+ return %Math_cos(x);
}
// ECMA 262 - 15.8.2.8
@@ -98,12 +98,12 @@
if (!IS_NUMBER(x)) x = ToNumber(x);
// It's more common to call this with a positive number that's out
// of range than negative numbers; check the upper bound first.
- if (x <= 0x7FFFFFFF && x > 0) {
+ if (x < 0x80000000 && x > 0) {
// Numbers in the range [0, 2^31) can be floored by converting
// them to an unsigned 32-bit value using the shift operator.
// We avoid doing so for -0, because the result of Math.floor(-0)
// has to be -0, which wouldn't be the case with the shift.
- return x << 0;
+ return TO_UINT32(x);
} else {
return %Math_floor(x);
}
@@ -128,8 +128,9 @@
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = ToNumber(n);
if (NUMBER_IS_NAN(n)) return n;
- // Make sure +0 is considered greater than -0.
- if (n > r || (r === 0 && n === 0 && !%_IsSmi(r))) r = n;
+ // Make sure +0 is considered greater than -0. -0 is never a Smi, +0 can be
+ // a Smi or heap number.
+ if (n > r || (r === 0 && n === 0 && !%_IsSmi(r) && 1 / r < 0)) r = n;
}
return r;
}
@@ -147,8 +148,9 @@
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = ToNumber(n);
if (NUMBER_IS_NAN(n)) return n;
- // Make sure -0 is considered less than +0.
- if (n < r || (r === 0 && n === 0 && !%_IsSmi(n))) r = n;
+ // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can b a
+ // Smi or a heap number.
+ if (n < r || (r === 0 && n === 0 && !%_IsSmi(n) && 1 / n < 0)) r = n;
}
return r;
}
@@ -174,7 +176,7 @@
// ECMA 262 - 15.8.2.16
function MathSin(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
- return %_Math_sin(x);
+ return %Math_sin(x);
}
// ECMA 262 - 15.8.2.17
diff --git a/src/messages.js b/src/messages.js
index 1e5053d..df008c9 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -157,6 +157,11 @@
instanceof_nonobject_proto: "Function has non-object prototype '%0' in instanceof check",
null_to_object: "Cannot convert null to object",
reduce_no_initial: "Reduce of empty array with no initial value",
+ getter_must_be_callable: "Getter must be a function: %0",
+ setter_must_be_callable: "Setter must be a function: %0",
+ value_and_accessor: "Invalid property. A property cannot both have accessors and be writable or have a value: %0",
+ proto_object_or_null: "Object prototype may only be an Object or null",
+ property_desc_object: "Property description must be an object: %0",
// RangeError
invalid_array_length: "Invalid array length",
stack_overflow: "Maximum call stack size exceeded",
@@ -173,7 +178,8 @@
result_not_primitive: "Result of %0 must be a primitive, was %1",
invalid_json: "String '%0' is not valid JSON",
circular_structure: "Converting circular structure to JSON",
- object_keys_non_object: "Object.keys called on non-object"
+ obj_ctor_property_non_object: "Object.%0 called on non-object",
+ array_indexof_not_defined: "Array.getIndexOf: Argument undefined"
};
}
var format = kMessages[message.type];
diff --git a/src/mirror-delay.js b/src/mirror-delay.js
index ba663b2..0269f1f 100644
--- a/src/mirror-delay.js
+++ b/src/mirror-delay.js
@@ -600,14 +600,14 @@
ObjectMirror.prototype.hasNamedInterceptor = function() {
// Get information on interceptors for this object.
- var x = %DebugInterceptorInfo(this.value_);
+ var x = %GetInterceptorInfo(this.value_);
return (x & 2) != 0;
};
ObjectMirror.prototype.hasIndexedInterceptor = function() {
// Get information on interceptors for this object.
- var x = %DebugInterceptorInfo(this.value_);
+ var x = %GetInterceptorInfo(this.value_);
return (x & 1) != 0;
};
@@ -631,13 +631,13 @@
// Find all the named properties.
if (kind & PropertyKind.Named) {
// Get the local property names.
- propertyNames = %DebugLocalPropertyNames(this.value_);
+ propertyNames = %GetLocalPropertyNames(this.value_);
total += propertyNames.length;
// Get names for named interceptor properties if any.
if (this.hasNamedInterceptor() && (kind & PropertyKind.Named)) {
var namedInterceptorNames =
- %DebugNamedInterceptorPropertyNames(this.value_);
+ %GetNamedInterceptorPropertyNames(this.value_);
if (namedInterceptorNames) {
propertyNames = propertyNames.concat(namedInterceptorNames);
total += namedInterceptorNames.length;
@@ -648,13 +648,13 @@
// Find all the indexed properties.
if (kind & PropertyKind.Indexed) {
// Get the local element names.
- elementNames = %DebugLocalElementNames(this.value_);
+ elementNames = %GetLocalElementNames(this.value_);
total += elementNames.length;
// Get names for indexed interceptor properties.
if (this.hasIndexedInterceptor() && (kind & PropertyKind.Indexed)) {
var indexedInterceptorNames =
- %DebugIndexedInterceptorElementNames(this.value_);
+ %GetIndexedInterceptorElementNames(this.value_);
if (indexedInterceptorNames) {
elementNames = elementNames.concat(indexedInterceptorNames);
total += indexedInterceptorNames.length;
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index eb743f8..10138d9 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -130,6 +130,10 @@
}
}
+ virtual int Position() {
+ return bytes_written_;
+ }
+
private:
FILE* fp_;
int bytes_written_;
@@ -151,6 +155,7 @@
}
i::Serializer::Enable();
Persistent<Context> context = v8::Context::New();
+ ASSERT(!context.IsEmpty());
// Make sure all builtin scripts are cached.
{ HandleScope scope;
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 36f65ee..7e77e81 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -587,7 +587,6 @@
case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
case PROXY_TYPE: return "PROXY";
- case SMI_TYPE: return "SMI";
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 8514a41..3003342 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -150,8 +150,12 @@
bool Object::IsSymbol() {
if (!this->IsHeapObject()) return false;
uint32_t type = HeapObject::cast(this)->map()->instance_type();
- return (type & (kIsNotStringMask | kIsSymbolMask)) ==
- (kStringTag | kSymbolTag);
+ // Because the symbol tag is non-zero and no non-string types have the
+ // symbol bit set we can test for symbols with a very simple test
+ // operation.
+ ASSERT(kSymbolTag != 0);
+ ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ return (type & kIsSymbolMask) != 0;
}
@@ -226,7 +230,8 @@
bool StringShape::IsSymbol() {
ASSERT(valid());
- return (type_ & kIsSymbolMask) == kSymbolTag;
+ ASSERT(kSymbolTag != 0);
+ return (type_ & kIsSymbolMask) != 0;
}
@@ -336,8 +341,8 @@
return false;
InstanceType instance_type =
HeapObject::cast(this)->map()->instance_type();
- return (instance_type >= EXTERNAL_BYTE_ARRAY_TYPE &&
- instance_type <= EXTERNAL_FLOAT_ARRAY_TYPE);
+ return (instance_type >= FIRST_EXTERNAL_ARRAY_TYPE &&
+ instance_type <= LAST_EXTERNAL_ARRAY_TYPE);
}
@@ -952,14 +957,14 @@
// exceed the object area size of a page.
ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
- int compact_offset = offset >> kObjectAlignmentBits;
+ uintptr_t compact_offset = offset >> kObjectAlignmentBits;
ASSERT(compact_offset < (1 << kForwardingOffsetBits));
Page* map_page = Page::FromAddress(map_address);
ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index);
- int map_page_offset =
- map_page->Offset(map_address) >> kObjectAlignmentBits;
+ uintptr_t map_page_offset =
+ map_page->Offset(map_address) >> kMapAlignmentBits;
uintptr_t encoding =
(compact_offset << kForwardingOffsetShift) |
@@ -975,8 +980,8 @@
ASSERT_MAP_PAGE_INDEX(map_page_index);
int map_page_offset = static_cast<int>(
- ((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift)
- << kObjectAlignmentBits);
+ ((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift) <<
+ kMapAlignmentBits);
return (map_space->PageAddress(map_page_index) + map_page_offset);
}
@@ -1499,7 +1504,7 @@
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
- // Make sure non of the elements in desc are in new space.
+ // Make sure none of the elements in desc are in new space.
ASSERT(!Heap::InNewSpace(desc->GetKey()));
ASSERT(!Heap::InNewSpace(desc->GetValue()));
diff --git a/src/objects.cc b/src/objects.cc
index 0f8dca3..118c489 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1351,6 +1351,8 @@
Object* JSObject::AddConstantFunctionProperty(String* name,
JSFunction* function,
PropertyAttributes attributes) {
+ ASSERT(!Heap::InNewSpace(function));
+
// Allocate new instance descriptors with (name, function) added
ConstantFunctionDescriptor d(name, function, attributes);
Object* new_descriptors =
@@ -1437,7 +1439,7 @@
// Ensure the descriptor array does not get too big.
if (map()->instance_descriptors()->number_of_descriptors() <
DescriptorArray::kMaxNumberOfDescriptors) {
- if (value->IsJSFunction()) {
+ if (value->IsJSFunction() && !Heap::InNewSpace(value)) {
return AddConstantFunctionProperty(name,
JSFunction::cast(value),
attributes);
@@ -3254,7 +3256,8 @@
return Heap::empty_descriptor_array();
}
// Allocate the array of keys.
- Object* array = Heap::AllocateFixedArray(ToKeyIndex(number_of_descriptors));
+ Object* array =
+ Heap::AllocateFixedArray(ToKeyIndex(number_of_descriptors));
if (array->IsFailure()) return array;
// Do not use DescriptorArray::cast on incomplete object.
FixedArray* result = FixedArray::cast(array);
@@ -6831,43 +6834,36 @@
template<typename Shape, typename Key>
-Object* HashTable<Shape, Key>::Allocate(
- int at_least_space_for) {
+Object* HashTable<Shape, Key>::Allocate(int at_least_space_for) {
int capacity = RoundUpToPowerOf2(at_least_space_for);
- if (capacity < 4) capacity = 4; // Guarantee min capacity.
+ if (capacity < 4) {
+ capacity = 4; // Guarantee min capacity.
+ } else if (capacity > HashTable::kMaxCapacity) {
+ return Failure::OutOfMemoryException();
+ }
+
Object* obj = Heap::AllocateHashTable(EntryToIndex(capacity));
if (!obj->IsFailure()) {
HashTable::cast(obj)->SetNumberOfElements(0);
+ HashTable::cast(obj)->SetNumberOfDeletedElements(0);
HashTable::cast(obj)->SetCapacity(capacity);
}
return obj;
}
-
-// Find entry for key otherwise return -1.
+// Find entry for key otherwise return kNotFound.
template<typename Shape, typename Key>
int HashTable<Shape, Key>::FindEntry(Key key) {
- uint32_t nof = NumberOfElements();
- if (nof == 0) return kNotFound; // Bail out if empty.
-
uint32_t capacity = Capacity();
- uint32_t hash = Shape::Hash(key);
- uint32_t entry = GetProbe(hash, 0, capacity);
-
- Object* element = KeyAt(entry);
- uint32_t passed_elements = 0;
- if (!element->IsNull()) {
- if (!element->IsUndefined() && Shape::IsMatch(key, element)) return entry;
- if (++passed_elements == nof) return kNotFound;
- }
- for (uint32_t i = 1; !element->IsUndefined(); i++) {
- entry = GetProbe(hash, i, capacity);
- element = KeyAt(entry);
- if (!element->IsNull()) {
- if (!element->IsUndefined() && Shape::IsMatch(key, element)) return entry;
- if (++passed_elements == nof) return kNotFound;
- }
+ uint32_t entry = FirstProbe(Shape::Hash(key), capacity);
+ uint32_t count = 1;
+ // EnsureCapacity will guarantee the hash table is never full.
+ while (true) {
+ Object* element = KeyAt(entry);
+ if (element->IsUndefined()) break; // Empty entry.
+ if (!element->IsNull() && Shape::IsMatch(key, element)) return entry;
+ entry = NextProbe(entry, count++, capacity);
}
return kNotFound;
}
@@ -6877,8 +6873,12 @@
Object* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
int capacity = Capacity();
int nof = NumberOfElements() + n;
- // Make sure 50% is free
- if (nof + (nof >> 1) <= capacity) return this;
+ int nod = NumberOfDeletedElements();
+ // Return if:
+ // 50% is still free after adding n elements and
+ // at most 50% of the free elements are deleted elements.
+ if ((nof + (nof >> 1) <= capacity) &&
+ (nod <= (capacity - nof) >> 1)) return this;
Object* obj = Allocate(nof * 2);
if (obj->IsFailure()) return obj;
@@ -6905,21 +6905,23 @@
}
}
table->SetNumberOfElements(NumberOfElements());
+ table->SetNumberOfDeletedElements(0);
return table;
}
+
template<typename Shape, typename Key>
uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) {
uint32_t capacity = Capacity();
- uint32_t entry = GetProbe(hash, 0, capacity);
- Object* element = KeyAt(entry);
-
- for (uint32_t i = 1; !(element->IsUndefined() || element->IsNull()); i++) {
- entry = GetProbe(hash, i, capacity);
- element = KeyAt(entry);
+ uint32_t entry = FirstProbe(hash, capacity);
+ uint32_t count = 1;
+ // EnsureCapacity will guarantee the hash table is never full.
+ while (true) {
+ Object* element = KeyAt(entry);
+ if (element->IsUndefined() || element->IsNull()) break;
+ entry = NextProbe(entry, count++, capacity);
}
-
return entry;
}
@@ -6998,6 +7000,10 @@
template
int Dictionary<StringDictionaryShape, String*>::NumberOfEnumElements();
+template
+int HashTable<NumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
+
+
// Collates undefined and unexisting elements below limit from position
// zero of the elements. The object stays in Dictionary mode.
Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
@@ -7700,7 +7706,7 @@
}
// Update the number of elements.
- SetNumberOfElements(NumberOfElements() - removed_entries);
+ ElementsRemoved(removed_entries);
}
@@ -7962,7 +7968,10 @@
PropertyType type = DetailsAt(i).type();
ASSERT(type != FIELD);
instance_descriptor_length++;
- if (type == NORMAL && !value->IsJSFunction()) number_of_fields += 1;
+ if (type == NORMAL &&
+ (!value->IsJSFunction() || Heap::InNewSpace(value))) {
+ number_of_fields += 1;
+ }
}
}
@@ -7993,7 +8002,7 @@
PropertyDetails details = DetailsAt(i);
PropertyType type = details.type();
- if (value->IsJSFunction()) {
+ if (value->IsJSFunction() && !Heap::InNewSpace(value)) {
ConstantFunctionDescriptor d(String::cast(key),
JSFunction::cast(value),
details.attributes(),
diff --git a/src/objects.h b/src/objects.h
index 671978a..40be0df 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -204,14 +204,13 @@
// instance_type is JS_OBJECT_TYPE.
//
// The names of the string instance types are intended to systematically
-// mirror their encoding in the instance_type field of the map. The length
-// (SHORT, MEDIUM, or LONG) is always mentioned. The default encoding is
-// considered TWO_BYTE. It is not mentioned in the name. ASCII encoding is
-// mentioned explicitly in the name. Likewise, the default representation is
-// considered sequential. It is not mentioned in the name. The other
-// representations (eg, CONS, EXTERNAL) are explicitly mentioned.
-// Finally, the string is either a SYMBOL_TYPE (if it is a symbol) or a
-// STRING_TYPE (if it is not a symbol).
+// mirror their encoding in the instance_type field of the map. The default
+// encoding is considered TWO_BYTE. It is not mentioned in the name. ASCII
+// encoding is mentioned explicitly in the name. Likewise, the default
+// representation is considered sequential. It is not mentioned in the
+// name. The other representations (eg, CONS, EXTERNAL) are explicitly
+// mentioned. Finally, the string is either a SYMBOL_TYPE (if it is a
+// symbol) or a STRING_TYPE (if it is not a symbol).
//
// NOTE: The following things are some that depend on the string types having
// instance_types that are less than those of all other types:
@@ -237,11 +236,11 @@
V(PRIVATE_EXTERNAL_ASCII_STRING_TYPE) \
\
V(MAP_TYPE) \
- V(HEAP_NUMBER_TYPE) \
- V(FIXED_ARRAY_TYPE) \
V(CODE_TYPE) \
V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
V(ODDBALL_TYPE) \
+ \
+ V(HEAP_NUMBER_TYPE) \
V(PROXY_TYPE) \
V(BYTE_ARRAY_TYPE) \
V(PIXEL_ARRAY_TYPE) \
@@ -257,6 +256,7 @@
V(EXTERNAL_FLOAT_ARRAY_TYPE) \
V(FILLER_TYPE) \
\
+ V(FIXED_ARRAY_TYPE) \
V(ACCESSOR_INFO_TYPE) \
V(ACCESS_CHECK_INFO_TYPE) \
V(INTERCEPTOR_INFO_TYPE) \
@@ -383,11 +383,12 @@
const uint32_t kStringTag = 0x0;
const uint32_t kNotStringTag = 0x80;
-// If bit 7 is clear, bit 5 indicates that the string is a symbol (if set) or
-// not (if cleared).
-const uint32_t kIsSymbolMask = 0x20;
+// Bit 6 indicates that the object is a symbol (if set) or not (if cleared).
+// There are not enough types that the non-string types (with bit 7 set) can
+// have bit 6 set too.
+const uint32_t kIsSymbolMask = 0x40;
const uint32_t kNotSymbolTag = 0x0;
-const uint32_t kSymbolTag = 0x20;
+const uint32_t kSymbolTag = 0x40;
// If bit 7 is clear then bit 2 indicates whether the string consists of
// two-byte characters or one-byte characters.
@@ -418,6 +419,7 @@
enum InstanceType {
+ // String types.
SYMBOL_TYPE = kSymbolTag | kSeqStringTag,
ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
CONS_SYMBOL_TYPE = kSymbolTag | kConsStringTag,
@@ -433,56 +435,66 @@
EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag,
PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
- MAP_TYPE = kNotStringTag,
- HEAP_NUMBER_TYPE,
- FIXED_ARRAY_TYPE,
+ // Objects allocated in their own spaces (never in new space).
+ MAP_TYPE = kNotStringTag, // FIRST_NONSTRING_TYPE
CODE_TYPE,
ODDBALL_TYPE,
JS_GLOBAL_PROPERTY_CELL_TYPE,
+
+ // "Data", objects that cannot contain non-map-word pointers to heap
+ // objects.
+ HEAP_NUMBER_TYPE,
PROXY_TYPE,
BYTE_ARRAY_TYPE,
PIXEL_ARRAY_TYPE,
- EXTERNAL_BYTE_ARRAY_TYPE,
+ EXTERNAL_BYTE_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE
EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
EXTERNAL_SHORT_ARRAY_TYPE,
EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
EXTERNAL_INT_ARRAY_TYPE,
EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
- EXTERNAL_FLOAT_ARRAY_TYPE,
- FILLER_TYPE,
- SMI_TYPE,
+ EXTERNAL_FLOAT_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
+ FILLER_TYPE, // LAST_DATA_TYPE
+ // Structs.
ACCESSOR_INFO_TYPE,
ACCESS_CHECK_INFO_TYPE,
INTERCEPTOR_INFO_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
CALL_HANDLER_INFO_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
OBJECT_TEMPLATE_INFO_TYPE,
SIGNATURE_INFO_TYPE,
TYPE_SWITCH_INFO_TYPE,
+ SCRIPT_TYPE,
#ifdef ENABLE_DEBUGGER_SUPPORT
DEBUG_INFO_TYPE,
BREAK_POINT_INFO_TYPE,
#endif
- SCRIPT_TYPE,
- JS_VALUE_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+
+ JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
JS_OBJECT_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JS_GLOBAL_OBJECT_TYPE,
JS_BUILTINS_OBJECT_TYPE,
JS_GLOBAL_PROXY_TYPE,
JS_ARRAY_TYPE,
- JS_REGEXP_TYPE,
+ JS_REGEXP_TYPE, // LAST_JS_OBJECT_TYPE
JS_FUNCTION_TYPE,
// Pseudo-types
- FIRST_NONSTRING_TYPE = MAP_TYPE,
FIRST_TYPE = 0x0,
- INVALID_TYPE = FIRST_TYPE - 1,
LAST_TYPE = JS_FUNCTION_TYPE,
+ INVALID_TYPE = FIRST_TYPE - 1,
+ FIRST_NONSTRING_TYPE = MAP_TYPE,
+ // Boundaries for testing for an external array.
+ FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE,
+ LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_FLOAT_ARRAY_TYPE,
+ // Boundary for promotion to old data space/old pointer space.
+ LAST_DATA_TYPE = FILLER_TYPE,
// Boundaries for testing the type is a JavaScript "object". Note that
// function objects are not counted as objects, even though they are
// implemented as such; only values whose typeof is "object" are included.
@@ -892,15 +904,25 @@
static const int kOverflowBit = 1; // overflow bit
static const int kOverflowMask = (1 << kOverflowBit); // overflow mask
- // Forwarding pointers and map pointer encoding
- // 31 21 20 10 9 0
+ // Forwarding pointers and map pointer encoding. On 32 bit all the bits are
+ // used.
// +-----------------+------------------+-----------------+
// |forwarding offset|page offset of map|page index of map|
// +-----------------+------------------+-----------------+
- // 11 bits 11 bits 10 bits
- static const int kMapPageIndexBits = 10;
- static const int kMapPageOffsetBits = 11;
- static const int kForwardingOffsetBits = 11;
+ // ^ ^ ^
+ // | | |
+ // | | kMapPageIndexBits
+ // | kMapPageOffsetBits
+ // kForwardingOffsetBits
+ static const int kMapPageOffsetBits = kPageSizeBits - kMapAlignmentBits;
+ static const int kForwardingOffsetBits = kPageSizeBits - kObjectAlignmentBits;
+#ifdef V8_HOST_ARCH_64_BIT
+ static const int kMapPageIndexBits = 16;
+#else
+ // Use all the 32-bits to encode on a 32-bit platform.
+ static const int kMapPageIndexBits =
+ 32 - (kMapPageOffsetBits + kForwardingOffsetBits);
+#endif
static const int kMapPageIndexShift = 0;
static const int kMapPageOffsetShift =
@@ -908,16 +930,12 @@
static const int kForwardingOffsetShift =
kMapPageOffsetShift + kMapPageOffsetBits;
- // 0x000003FF
- static const uint32_t kMapPageIndexMask =
+ // Bit masks covering the different parts the encoding.
+ static const uintptr_t kMapPageIndexMask =
(1 << kMapPageOffsetShift) - 1;
-
- // 0x001FFC00
- static const uint32_t kMapPageOffsetMask =
+ static const uintptr_t kMapPageOffsetMask =
((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
-
- // 0xFFE00000
- static const uint32_t kForwardingOffsetMask =
+ static const uintptr_t kForwardingOffsetMask =
~(kMapPageIndexMask | kMapPageOffsetMask);
private:
@@ -1491,6 +1509,10 @@
#endif
Object* SlowReverseLookup(Object* value);
+ // Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
+ // Also maximal value of JSArray's length property.
+ static const uint32_t kMaxElementCount = 0xffffffffu;
+
static const uint32_t kMaxGap = 1024;
static const int kMaxFastElementsLength = 5000;
static const int kInitialMaxFastElementArray = 100000;
@@ -1617,8 +1639,14 @@
// Casting.
static inline FixedArray* cast(Object* obj);
- // Align data at kPointerSize, even if Array.kHeaderSize isn't aligned.
- static const int kHeaderSize = POINTER_SIZE_ALIGN(Array::kHeaderSize);
+ static const int kHeaderSize = Array::kAlignedSize;
+
+ // Maximal allowed size, in bytes, of a single FixedArray.
+ // Prevents overflowing size computations, as well as extreme memory
+ // consumption.
+ static const int kMaxSize = 512 * MB;
+ // Maximally allowed length of a FixedArray.
+ static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
// Dispatched behavior.
int FixedArraySize() { return SizeFor(length()); }
@@ -1662,6 +1690,7 @@
public:
// Is this the singleton empty_descriptor_array?
inline bool IsEmpty();
+
// Returns the number of descriptors in the array.
int number_of_descriptors() {
return IsEmpty() ? 0 : length() - kFirstIndex;
@@ -1801,12 +1830,14 @@
static int ToKeyIndex(int descriptor_number) {
return descriptor_number+kFirstIndex;
}
+
+ static int ToDetailsIndex(int descriptor_number) {
+ return (descriptor_number << 1) + 1;
+ }
+
static int ToValueIndex(int descriptor_number) {
return descriptor_number << 1;
}
- static int ToDetailsIndex(int descriptor_number) {
- return( descriptor_number << 1) + 1;
- }
bool is_null_descriptor(int descriptor_number) {
return PropertyDetails(GetDetails(descriptor_number)).type() ==
@@ -1866,6 +1897,11 @@
return Smi::cast(get(kNumberOfElementsIndex))->value();
}
+ // Returns the number of deleted elements in the hash table.
+ int NumberOfDeletedElements() {
+ return Smi::cast(get(kNumberOfDeletedElementsIndex))->value();
+ }
+
// Returns the capacity of the hash table.
int Capacity() {
return Smi::cast(get(kCapacityIndex))->value();
@@ -1877,8 +1913,14 @@
// ElementRemoved should be called whenever an element is removed from
// a hash table.
- void ElementRemoved() { SetNumberOfElements(NumberOfElements() - 1); }
- void ElementsRemoved(int n) { SetNumberOfElements(NumberOfElements() - n); }
+ void ElementRemoved() {
+ SetNumberOfElements(NumberOfElements() - 1);
+ SetNumberOfDeletedElements(NumberOfDeletedElements() + 1);
+ }
+ void ElementsRemoved(int n) {
+ SetNumberOfElements(NumberOfElements() - n);
+ SetNumberOfDeletedElements(NumberOfDeletedElements() + n);
+ }
// Returns a new HashTable object. Might return Failure.
static Object* Allocate(int at_least_space_for);
@@ -1905,17 +1947,24 @@
}
static const int kNumberOfElementsIndex = 0;
- static const int kCapacityIndex = 1;
- static const int kPrefixStartIndex = 2;
- static const int kElementsStartIndex =
+ static const int kNumberOfDeletedElementsIndex = 1;
+ static const int kCapacityIndex = 2;
+ static const int kPrefixStartIndex = 3;
+ static const int kElementsStartIndex =
kPrefixStartIndex + Shape::kPrefixSize;
- static const int kEntrySize = Shape::kEntrySize;
- static const int kElementsStartOffset =
+ static const int kEntrySize = Shape::kEntrySize;
+ static const int kElementsStartOffset =
kHeaderSize + kElementsStartIndex * kPointerSize;
// Constant used for denoting a absent entry.
static const int kNotFound = -1;
+ // Maximal capacity of HashTable. Based on maximal length of underlying
+ // FixedArray. Staying below kMaxCapacity also ensures that EntryToIndex
+ // cannot overflow.
+ static const int kMaxCapacity =
+ (FixedArray::kMaxLength - kElementsStartOffset) / kEntrySize;
+
// Find entry for key otherwise return -1.
int FindEntry(Key key);
@@ -1935,12 +1984,18 @@
fast_set(this, kNumberOfElementsIndex, Smi::FromInt(nof));
}
+ // Update the number of deleted elements in the hash table.
+ void SetNumberOfDeletedElements(int nod) {
+ fast_set(this, kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
+ }
+
// Sets the capacity of the hash table.
void SetCapacity(int capacity) {
// To scale a computed hash code to fit within the hash table, we
// use bit-wise AND with a mask, so the capacity must be positive
// and non-zero.
ASSERT(capacity > 0);
+ ASSERT(capacity <= kMaxCapacity);
fast_set(this, kCapacityIndex, Smi::FromInt(capacity));
}
@@ -1951,6 +2006,14 @@
return (hash + GetProbeOffset(number)) & (size - 1);
}
+ static uint32_t FirstProbe(uint32_t hash, uint32_t size) {
+ return hash & (size - 1);
+ }
+
+ static uint32_t NextProbe(uint32_t last, uint32_t number, uint32_t size) {
+ return (last + number) & (size - 1);
+ }
+
// Ensure enough space for n additional elements.
Object* EnsureCapacity(int n, Key key);
};
@@ -2280,6 +2343,11 @@
static const int kHeaderSize = Array::kHeaderSize;
static const int kAlignedSize = Array::kAlignedSize;
+ // Maximal memory consumption for a single ByteArray.
+ static const int kMaxSize = 512 * MB;
+ // Maximal length of a single ByteArray.
+ static const int kMaxLength = kMaxSize - kHeaderSize;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
};
@@ -2821,6 +2889,14 @@
return ((1 << kHasInstanceCallHandler) & bit_field()) != 0;
}
+ inline void set_is_extensible() {
+ set_bit_field2(bit_field2() | (1 << kIsExtensible));
+ }
+
+ inline bool is_extensible() {
+ return ((1 << kIsExtensible) & bit_field2()) != 0;
+ }
+
// Tells whether the instance needs security checks when accessing its
// properties.
inline void set_is_access_check_needed(bool access_check_needed);
@@ -2838,7 +2914,6 @@
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, FixedArray)
- // Returns a copy of the map.
Object* CopyDropDescriptors();
// Returns a copy of the map, with all transitions dropped from the
@@ -2906,7 +2981,8 @@
static const int kInstanceDescriptorsOffset =
kConstructorOffset + kPointerSize;
static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
- static const int kSize = kCodeCacheOffset + kPointerSize;
+ static const int kPadStart = kCodeCacheOffset + kPointerSize;
+ static const int kSize = MAP_SIZE_ALIGN(kPadStart);
// Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
@@ -2938,6 +3014,7 @@
// Bit positions for bit field 2
static const int kNeedsLoading = 0;
+ static const int kIsExtensible = 1;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
@@ -3566,6 +3643,14 @@
static const int kIrregexpCaptureCountIndex = kDataIndex + 3;
static const int kIrregexpDataSize = kIrregexpCaptureCountIndex + 1;
+
+ // Offsets directly into the data fixed array.
+ static const int kDataTagOffset =
+ FixedArray::kHeaderSize + kTagIndex * kPointerSize;
+ static const int kDataAsciiCodeOffset =
+ FixedArray::kHeaderSize + kIrregexpASCIICodeIndex * kPointerSize;
+ static const int kIrregexpCaptureCountOffset =
+ FixedArray::kHeaderSize + kIrregexpCaptureCountIndex * kPointerSize;
};
@@ -3989,6 +4074,12 @@
static const int kHeaderSize = String::kSize;
static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
+ // Maximal memory usage for a single sequential ASCII string.
+ static const int kMaxSize = 512 * MB;
+ // Maximal length of a single sequential ASCII string.
+ // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
+ static const int kMaxLength = (kMaxSize - kHeaderSize);
+
// Support for StringInputBuffer.
inline void SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
unsigned* offset,
@@ -4035,6 +4126,12 @@
static const int kHeaderSize = String::kSize;
static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
+ // Maximal memory usage for a single sequential two-byte string.
+ static const int kMaxSize = 512 * MB;
+ // Maximal length of a single sequential two-byte string.
+ // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
+ static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
+
// Support for StringInputBuffer.
inline void SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
unsigned* offset_ptr,
diff --git a/src/parser.cc b/src/parser.cc
index c37078c..3ae8577 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -371,7 +371,7 @@
void AddAtom(RegExpTree* tree);
void AddAssertion(RegExpTree* tree);
void NewAlternative(); // '|'
- void AddQuantifierToAtom(int min, int max, bool is_greedy);
+ void AddQuantifierToAtom(int min, int max, RegExpQuantifier::Type type);
RegExpTree* ToRegExp();
private:
void FlushCharacters();
@@ -503,7 +503,9 @@
}
-void RegExpBuilder::AddQuantifierToAtom(int min, int max, bool is_greedy) {
+void RegExpBuilder::AddQuantifierToAtom(int min,
+ int max,
+ RegExpQuantifier::Type type) {
if (pending_empty_) {
pending_empty_ = false;
return;
@@ -543,7 +545,7 @@
UNREACHABLE();
return;
}
- terms_.Add(new RegExpQuantifier(min, max, is_greedy, atom));
+ terms_.Add(new RegExpQuantifier(min, max, type, atom));
LAST(ADD_TERM);
}
@@ -1866,8 +1868,10 @@
Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction());
const int literals = fun->NumberOfLiterals();
Handle<Code> code = Handle<Code>(fun->shared()->code());
+ Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
Handle<JSFunction> boilerplate =
Factory::NewFunctionBoilerplate(name, literals, code);
+ boilerplate->shared()->set_construct_stub(*construct_stub);
// Copy the function data to the boilerplate. Used by
// builtins.cc:HandleApiCall to perform argument type checks and to
@@ -2657,6 +2661,9 @@
Expression* cond = NULL;
if (peek() != Token::SEMICOLON) {
cond = ParseExpression(true, CHECK_OK);
+ if (cond && cond->AsCompareOperation()) {
+ cond->AsCompareOperation()->set_is_for_loop_condition();
+ }
}
Expect(Token::SEMICOLON, CHECK_OK);
@@ -3329,7 +3336,7 @@
ArrayLiteral* array_literal = expression->AsArrayLiteral();
ASSERT(array_literal != NULL && array_literal->is_simple());
result->set(kTypeSlot, Smi::FromInt(ARRAY_LITERAL));
- result->set(kElementsSlot, *array_literal->literals());
+ result->set(kElementsSlot, *array_literal->constant_elements());
}
return result;
}
@@ -3593,7 +3600,7 @@
top_scope_->NewUnresolved(function_name, inside_with());
fproxy->BindTo(fvar);
body.Add(new ExpressionStatement(
- new Assignment(Token::INIT_VAR, fproxy,
+ new Assignment(Token::INIT_CONST, fproxy,
NEW(ThisFunction()),
RelocInfo::kNoPosition)));
}
@@ -4275,12 +4282,16 @@
default:
continue;
}
- bool is_greedy = true;
+ RegExpQuantifier::Type type = RegExpQuantifier::GREEDY;
if (current() == '?') {
- is_greedy = false;
+ type = RegExpQuantifier::NON_GREEDY;
+ Advance();
+ } else if (FLAG_regexp_possessive_quantifier && current() == '+') {
+ // FLAG_regexp_possessive_quantifier is a debug-only flag.
+ type = RegExpQuantifier::POSSESSIVE;
Advance();
}
- builder->AddQuantifierToAtom(min, max, is_greedy);
+ builder->AddQuantifierToAtom(min, max, type);
}
}
@@ -4702,6 +4713,11 @@
}
+bool ScriptDataImpl::HasError() {
+ return has_error();
+}
+
+
ScriptDataImpl* PreParse(Handle<String> source,
unibrow::CharacterStream* stream,
v8::Extension* extension) {
diff --git a/src/parser.h b/src/parser.h
index 7328e81..a67284c 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -91,6 +91,7 @@
virtual ~ScriptDataImpl();
virtual int Length();
virtual unsigned* Data();
+ virtual bool HasError();
FunctionEntry GetFunctionEnd(int start);
bool SanityCheck();
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 87da026..9ef7270 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -594,11 +594,11 @@
ast_printer_->inc_indent();
}
- explicit IndentedScope(const char* txt, SmiAnalysis* type = NULL) {
+ explicit IndentedScope(const char* txt, StaticType* type = NULL) {
ast_printer_->PrintIndented(txt);
if ((type != NULL) && (type->IsKnown())) {
ast_printer_->Print(" (type = ");
- ast_printer_->Print(SmiAnalysis::Type2String(type));
+ ast_printer_->Print(StaticType::Type2String(type));
ast_printer_->Print(")");
}
ast_printer_->Print("\n");
@@ -657,7 +657,7 @@
void AstPrinter::PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value,
- SmiAnalysis* type) {
+ StaticType* type) {
if (var == NULL) {
PrintLiteralIndented(info, value, true);
} else {
@@ -665,7 +665,7 @@
if (type->IsKnown()) {
OS::SNPrintF(buf, "%s (mode = %s, type = %s)", info,
Variable::Mode2String(var->mode()),
- SmiAnalysis::Type2String(type));
+ StaticType::Type2String(type));
} else {
OS::SNPrintF(buf, "%s (mode = %s)", info,
Variable::Mode2String(var->mode()));
@@ -1072,7 +1072,7 @@
OS::SNPrintF(buf, "%s %s (type = %s)",
(node->is_prefix() ? "PRE" : "POST"),
Token::Name(node->op()),
- SmiAnalysis::Type2String(node->type()));
+ StaticType::Type2String(node->type()));
} else {
OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
Token::Name(node->op()));
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index f885cb3..dfff49a 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -102,7 +102,7 @@
void PrintLiteralWithModeIndented(const char* info,
Variable* var,
Handle<Object> value,
- SmiAnalysis* type);
+ StaticType* type);
void PrintLabelsIndented(const char* info, ZoneStringList* labels);
void inc_indent() { indent_++; }
diff --git a/src/regexp-delay.js b/src/regexp-delay.js
index 14c3644..7bec455 100644
--- a/src/regexp-delay.js
+++ b/src/regexp-delay.js
@@ -136,13 +136,7 @@
function DoRegExpExec(regexp, string, index) {
- return %RegExpExec(regexp, string, index, lastMatchInfo);
-}
-
-
-function DoRegExpExecGlobal(regexp, string) {
- // Returns an array of arrays of substring indices.
- return %RegExpExecGlobal(regexp, string, lastMatchInfo);
+ return %_RegExpExec(regexp, string, index, lastMatchInfo);
}
@@ -170,7 +164,7 @@
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %RegExpExec(this, s, i, lastMatchInfo);
+ var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
if (matchIndices == null) {
if (this.global) this.lastIndex = 0;
@@ -227,7 +221,7 @@
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %RegExpExec(this, s, i, lastMatchInfo);
+ var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
if (matchIndices == null) {
if (this.global) this.lastIndex = 0;
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
index 0aad337..c5c2919 100644
--- a/src/regexp-macro-assembler-tracer.cc
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -307,18 +307,11 @@
bool RegExpMacroAssemblerTracer::CheckSpecialCharacterClass(
uc16 type,
- int cp_offset,
- bool check_offset,
Label* on_no_match) {
bool supported = assembler_->CheckSpecialCharacterClass(type,
- cp_offset,
- check_offset,
on_no_match);
- PrintF(" CheckSpecialCharacterClass(type='%c', offset=%d, "
- "check_offset=%s, label[%08x]): %s;\n",
+ PrintF(" CheckSpecialCharacterClass(type='%c', label[%08x]): %s;\n",
type,
- cp_offset,
- check_offset ? "true" : "false",
on_no_match,
supported ? "true" : "false");
return supported;
diff --git a/src/regexp-macro-assembler-tracer.h b/src/regexp-macro-assembler-tracer.h
index 28ca5f3..9608f9e 100644
--- a/src/regexp-macro-assembler-tracer.h
+++ b/src/regexp-macro-assembler-tracer.h
@@ -69,8 +69,6 @@
uc16 and_with,
Label* on_not_equal);
virtual bool CheckSpecialCharacterClass(uc16 type,
- int cp_offset,
- bool check_offset,
Label* on_no_match);
virtual void Fail();
virtual Handle<Object> GetCode(Handle<String> source);
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index 9ae19d7..3685fcd 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -143,17 +143,6 @@
input_end,
offsets_vector,
previous_index == 0);
-
- if (res == SUCCESS) {
- // Capture values are relative to start_offset only.
- // Convert them to be relative to start of string.
- for (int i = 0; i < offsets_vector_length; i++) {
- if (offsets_vector[i] >= 0) {
- offsets_vector[i] += previous_index;
- }
- }
- }
-
return res;
}
@@ -167,7 +156,7 @@
int* output,
bool at_start) {
typedef int (*matcher)(String*, int, const byte*,
- const byte*, int*, int, Address);
+ const byte*, int*, int, Address, int);
matcher matcher_func = FUNCTION_CAST<matcher>(code->entry());
int at_start_val = at_start ? 1 : 0;
@@ -176,6 +165,7 @@
RegExpStack stack;
Address stack_base = RegExpStack::stack_base();
+ int direct_call = 0;
int result = CALL_GENERATED_REGEXP_CODE(matcher_func,
input,
start_offset,
@@ -183,7 +173,8 @@
input_end,
output,
at_start_val,
- stack_base);
+ stack_base,
+ direct_call);
ASSERT(result <= SUCCESS);
ASSERT(result >= RETRY);
@@ -198,6 +189,30 @@
static unibrow::Mapping<unibrow::Ecma262Canonicalize> canonicalize;
+
+byte NativeRegExpMacroAssembler::word_character_map[] = {
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // '0' - '7'
+ 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // '8' - '9'
+
+ 0x00u, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'A' - 'G'
+ 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'H' - 'O'
+ 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'P' - 'W'
+ 0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0xffu, // 'X' - 'Z', '_'
+
+ 0x00u, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'a' - 'g'
+ 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'h' - 'o'
+ 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'p' - 'w'
+ 0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // 'x' - 'z'
+};
+
+
int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
Address byte_offset1,
Address byte_offset2,
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index aa01096..2e619bd 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -123,8 +123,6 @@
// not have custom support.
// May clobber the current loaded character.
virtual bool CheckSpecialCharacterClass(uc16 type,
- int cp_offset,
- bool check_offset,
Label* on_no_match) {
return false;
}
@@ -206,6 +204,15 @@
static const byte* StringCharacterPosition(String* subject, int start_index);
+ // Byte map of ASCII characters with a 0xff if the character is a word
+ // character (digit, letter or underscore) and 0x00 otherwise.
+ // Used by generated RegExp code.
+ static byte word_character_map[128];
+
+ static Address word_character_map_address() {
+ return &word_character_map[0];
+ }
+
static Result Execute(Code* code,
String* input,
int start_offset,
diff --git a/src/regexp-stack.h b/src/regexp-stack.h
index fbaa6fb..b4fa2e9 100644
--- a/src/regexp-stack.h
+++ b/src/regexp-stack.h
@@ -98,12 +98,24 @@
void Free();
};
+ // Address of allocated memory.
+ static Address memory_address() {
+ return reinterpret_cast<Address>(&thread_local_.memory_);
+ }
+
+ // Address of size of allocated memory.
+ static Address memory_size_address() {
+ return reinterpret_cast<Address>(&thread_local_.memory_size_);
+ }
+
// Resets the buffer if it has grown beyond the default/minimum size.
// After this, the buffer is either the default size, or it is empty, so
// you have to call EnsureCapacity before using it again.
static void Reset();
static ThreadLocal thread_local_;
+
+ friend class ExternalReference;
};
}} // namespace v8::internal
diff --git a/src/rewriter.cc b/src/rewriter.cc
index de1b95b..b05cfae 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -367,7 +367,7 @@
if (proxy != NULL) {
Variable* var = proxy->AsVariable();
if (var != NULL) {
- SmiAnalysis* var_type = var->type();
+ StaticType* var_type = var->type();
if (var_type->IsUnknown()) {
var_type->CopyFrom(node->type());
} else if (var_type->IsLikelySmi()) {
diff --git a/src/runtime.cc b/src/runtime.cc
index b07361a..b6da528 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -398,6 +398,82 @@
}
+static Object* Runtime_CreateObjectLiteral(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+ CONVERT_SMI_CHECKED(literals_index, args[1]);
+ CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
+
+ // Check if boilerplate exists. If not, create it first.
+ Handle<Object> boilerplate(literals->get(literals_index));
+ if (*boilerplate == Heap::undefined_value()) {
+ boilerplate = CreateObjectLiteralBoilerplate(literals, constant_properties);
+ if (boilerplate.is_null()) return Failure::Exception();
+ // Update the functions literal and return the boilerplate.
+ literals->set(literals_index, *boilerplate);
+ }
+ return DeepCopyBoilerplate(JSObject::cast(*boilerplate));
+}
+
+
+static Object* Runtime_CreateObjectLiteralShallow(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+ CONVERT_SMI_CHECKED(literals_index, args[1]);
+ CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
+
+ // Check if boilerplate exists. If not, create it first.
+ Handle<Object> boilerplate(literals->get(literals_index));
+ if (*boilerplate == Heap::undefined_value()) {
+ boilerplate = CreateObjectLiteralBoilerplate(literals, constant_properties);
+ if (boilerplate.is_null()) return Failure::Exception();
+ // Update the functions literal and return the boilerplate.
+ literals->set(literals_index, *boilerplate);
+ }
+ return Heap::CopyJSObject(JSObject::cast(*boilerplate));
+}
+
+
+static Object* Runtime_CreateArrayLiteral(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+ CONVERT_SMI_CHECKED(literals_index, args[1]);
+ CONVERT_ARG_CHECKED(FixedArray, elements, 2);
+
+ // Check if boilerplate exists. If not, create it first.
+ Handle<Object> boilerplate(literals->get(literals_index));
+ if (*boilerplate == Heap::undefined_value()) {
+ boilerplate = CreateArrayLiteralBoilerplate(literals, elements);
+ if (boilerplate.is_null()) return Failure::Exception();
+ // Update the functions literal and return the boilerplate.
+ literals->set(literals_index, *boilerplate);
+ }
+ return DeepCopyBoilerplate(JSObject::cast(*boilerplate));
+}
+
+
+static Object* Runtime_CreateArrayLiteralShallow(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 3);
+ CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+ CONVERT_SMI_CHECKED(literals_index, args[1]);
+ CONVERT_ARG_CHECKED(FixedArray, elements, 2);
+
+ // Check if boilerplate exists. If not, create it first.
+ Handle<Object> boilerplate(literals->get(literals_index));
+ if (*boilerplate == Heap::undefined_value()) {
+ boilerplate = CreateArrayLiteralBoilerplate(literals, elements);
+ if (boilerplate.is_null()) return Failure::Exception();
+ // Update the functions literal and return the boilerplate.
+ literals->set(literals_index, *boilerplate);
+ }
+ return Heap::CopyJSObject(JSObject::cast(*boilerplate));
+}
+
+
static Object* Runtime_CreateCatchExtensionObject(Arguments args) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, key, args[0]);
@@ -483,6 +559,82 @@
}
+// Recursively traverses hidden prototypes if property is not found
+static void GetOwnPropertyImplementation(JSObject* obj,
+ String* name,
+ LookupResult* result) {
+ obj->LocalLookupRealNamedProperty(name, result);
+
+ if (!result->IsProperty()) {
+ Object* proto = obj->GetPrototype();
+ if (proto->IsJSObject() &&
+ JSObject::cast(proto)->map()->is_hidden_prototype())
+ GetOwnPropertyImplementation(JSObject::cast(proto),
+ name, result);
+ }
+}
+
+
+// Returns an array with the property description:
+// if args[1] is not a property on args[0]
+// returns undefined
+// if args[1] is a data property on args[0]
+// [false, value, Writeable, Enumerable, Configurable]
+// if args[1] is an accessor on args[0]
+// [true, GetFunction, SetFunction, Enumerable, Configurable]
+static Object* Runtime_GetOwnProperty(Arguments args) {
+ ASSERT(args.length() == 2);
+ HandleScope scope;
+ Handle<FixedArray> elms = Factory::NewFixedArray(5);
+ Handle<JSArray> desc = Factory::NewJSArrayWithElements(elms);
+ LookupResult result;
+ CONVERT_CHECKED(JSObject, obj, args[0]);
+ CONVERT_CHECKED(String, name, args[1]);
+
+ // Use recursive implementation to also traverse hidden prototypes
+ GetOwnPropertyImplementation(obj, name, &result);
+
+ if (!result.IsProperty())
+ return Heap::undefined_value();
+
+ if (result.type() == CALLBACKS) {
+ Object* structure = result.GetCallbackObject();
+ if (structure->IsProxy()) {
+ // Property that is internally implemented as a callback.
+ Object* value = obj->GetPropertyWithCallback(
+ obj, structure, name, result.holder());
+ elms->set(0, Heap::false_value());
+ elms->set(1, value);
+ elms->set(2, Heap::ToBoolean(!result.IsReadOnly()));
+ } else if (structure->IsFixedArray()) {
+ // __defineGetter__/__defineSetter__ callback.
+ elms->set(0, Heap::true_value());
+ elms->set(1, FixedArray::cast(structure)->get(0));
+ elms->set(2, FixedArray::cast(structure)->get(1));
+ } else {
+ // TODO(ricow): Handle API callbacks.
+ return Heap::undefined_value();
+ }
+ } else {
+ elms->set(0, Heap::false_value());
+ elms->set(1, result.GetLazyValue());
+ elms->set(2, Heap::ToBoolean(!result.IsReadOnly()));
+ }
+
+ elms->set(3, Heap::ToBoolean(!result.IsDontEnum()));
+ elms->set(4, Heap::ToBoolean(!result.IsReadOnly()));
+ return *desc;
+}
+
+
+static Object* Runtime_IsExtensible(Arguments args) {
+ ASSERT(args.length() == 1);
+ CONVERT_CHECKED(JSObject, obj, args[0]);
+ return obj->map()->is_extensible() ? Heap::true_value()
+ : Heap::false_value();
+}
+
+
static Object* Runtime_RegExpCompile(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
@@ -644,7 +796,7 @@
// Copy the function and update its context. Use it as value.
Handle<JSFunction> boilerplate = Handle<JSFunction>::cast(value);
Handle<JSFunction> function =
- Factory::NewFunctionFromBoilerplate(boilerplate, context);
+ Factory::NewFunctionFromBoilerplate(boilerplate, context, TENURED);
value = function;
}
@@ -1082,6 +1234,7 @@
RUNTIME_ASSERT(last_match_info->HasFastElements());
RUNTIME_ASSERT(index >= 0);
RUNTIME_ASSERT(index <= subject->length());
+ Counters::regexp_entry_runtime.Increment();
Handle<Object> result = RegExpImpl::Exec(regexp,
subject,
index,
@@ -1308,6 +1461,17 @@
}
+static Object* CharFromCode(Object* char_code) {
+ uint32_t code;
+ if (Array::IndexFromObject(char_code, &code)) {
+ if (code <= 0xffff) {
+ return Heap::LookupSingleCharacterStringFromCode(code);
+ }
+ }
+ return Heap::empty_string();
+}
+
+
static Object* Runtime_StringCharCodeAt(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -1318,16 +1482,24 @@
}
+static Object* Runtime_StringCharAt(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_CHECKED(String, subject, args[0]);
+ Object* index = args[1];
+ Object* code = CharCodeAt(subject, index);
+ if (code == Heap::nan_value()) {
+ return Heap::undefined_value();
+ }
+ return CharFromCode(code);
+}
+
+
static Object* Runtime_CharFromCode(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- uint32_t code;
- if (Array::IndexFromObject(args[0], &code)) {
- if (code <= 0xffff) {
- return Heap::LookupSingleCharacterStringFromCode(code);
- }
- }
- return Heap::empty_string();
+ return CharFromCode(args[0]);
}
// Forward declarations.
@@ -1433,7 +1605,7 @@
void IncrementCharacterCount(int by) {
- if (character_count_ > Smi::kMaxValue - by) {
+ if (character_count_ > String::kMaxLength - by) {
V8::FatalProcessOutOfMemory("String.replace result too large.");
}
character_count_ += by;
@@ -2397,6 +2569,7 @@
RUNTIME_ASSERT(end >= start);
RUNTIME_ASSERT(start >= 0);
RUNTIME_ASSERT(end <= value->length());
+ Counters::sub_string_runtime.Increment();
return value->SubString(start, end);
}
@@ -2648,7 +2821,6 @@
}
-
// KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
static Object* Runtime_KeyedGetProperty(Arguments args) {
NoHandleAllocation ha;
@@ -2700,6 +2872,13 @@
// If value is the hole do the general lookup.
}
}
+ } else if (args[0]->IsString() && args[1]->IsSmi()) {
+ // Fast case for string indexing using [] with a smi index.
+ HandleScope scope;
+ Handle<String> str = args.at<String>(0);
+ int index = Smi::cast(args[1])->value();
+ Handle<Object> result = GetCharAt(str, index);
+ return *result;
}
// Fall back to GetObjectProperty.
@@ -3043,6 +3222,156 @@
}
+// Find the length of the prototype chain that is to to handled as one. If a
+// prototype object is hidden it is to be viewed as part of the the object it
+// is prototype for.
+static int LocalPrototypeChainLength(JSObject* obj) {
+ int count = 1;
+ Object* proto = obj->GetPrototype();
+ while (proto->IsJSObject() &&
+ JSObject::cast(proto)->map()->is_hidden_prototype()) {
+ count++;
+ proto = JSObject::cast(proto)->GetPrototype();
+ }
+ return count;
+}
+
+
+// Return the names of the local named properties.
+// args[0]: object
+static Object* Runtime_GetLocalPropertyNames(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ if (!args[0]->IsJSObject()) {
+ return Heap::undefined_value();
+ }
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+ // Skip the global proxy as it has no properties and always delegates to the
+ // real global object.
+ if (obj->IsJSGlobalProxy()) {
+ obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
+ }
+
+ // Find the number of objects making up this.
+ int length = LocalPrototypeChainLength(*obj);
+
+ // Find the number of local properties for each of the objects.
+ int* local_property_count = NewArray<int>(length);
+ int total_property_count = 0;
+ Handle<JSObject> jsproto = obj;
+ for (int i = 0; i < length; i++) {
+ int n;
+ n = jsproto->NumberOfLocalProperties(static_cast<PropertyAttributes>(NONE));
+ local_property_count[i] = n;
+ total_property_count += n;
+ if (i < length - 1) {
+ jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
+ }
+ }
+
+ // Allocate an array with storage for all the property names.
+ Handle<FixedArray> names = Factory::NewFixedArray(total_property_count);
+
+ // Get the property names.
+ jsproto = obj;
+ int proto_with_hidden_properties = 0;
+ for (int i = 0; i < length; i++) {
+ jsproto->GetLocalPropertyNames(*names,
+ i == 0 ? 0 : local_property_count[i - 1]);
+ if (!GetHiddenProperties(jsproto, false)->IsUndefined()) {
+ proto_with_hidden_properties++;
+ }
+ if (i < length - 1) {
+ jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
+ }
+ }
+
+ // Filter out name of hidden propeties object.
+ if (proto_with_hidden_properties > 0) {
+ Handle<FixedArray> old_names = names;
+ names = Factory::NewFixedArray(
+ names->length() - proto_with_hidden_properties);
+ int dest_pos = 0;
+ for (int i = 0; i < total_property_count; i++) {
+ Object* name = old_names->get(i);
+ if (name == Heap::hidden_symbol()) {
+ continue;
+ }
+ names->set(dest_pos++, name);
+ }
+ }
+
+ DeleteArray(local_property_count);
+ return *Factory::NewJSArrayWithElements(names);
+}
+
+
+// Return the names of the local indexed properties.
+// args[0]: object
+static Object* Runtime_GetLocalElementNames(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ if (!args[0]->IsJSObject()) {
+ return Heap::undefined_value();
+ }
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+ int n = obj->NumberOfLocalElements(static_cast<PropertyAttributes>(NONE));
+ Handle<FixedArray> names = Factory::NewFixedArray(n);
+ obj->GetLocalElementKeys(*names, static_cast<PropertyAttributes>(NONE));
+ return *Factory::NewJSArrayWithElements(names);
+}
+
+
+// Return information on whether an object has a named or indexed interceptor.
+// args[0]: object
+static Object* Runtime_GetInterceptorInfo(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ if (!args[0]->IsJSObject()) {
+ return Smi::FromInt(0);
+ }
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+ int result = 0;
+ if (obj->HasNamedInterceptor()) result |= 2;
+ if (obj->HasIndexedInterceptor()) result |= 1;
+
+ return Smi::FromInt(result);
+}
+
+
+// Return property names from named interceptor.
+// args[0]: object
+static Object* Runtime_GetNamedInterceptorPropertyNames(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+ if (obj->HasNamedInterceptor()) {
+ v8::Handle<v8::Array> result = GetKeysForNamedInterceptor(obj, obj);
+ if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
+ }
+ return Heap::undefined_value();
+}
+
+
+// Return element names from indexed interceptor.
+// args[0]: object
+static Object* Runtime_GetIndexedInterceptorElementNames(Arguments args) {
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
+
+ if (obj->HasIndexedInterceptor()) {
+ v8::Handle<v8::Array> result = GetKeysForIndexedInterceptor(obj, obj);
+ if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
+ }
+ return Heap::undefined_value();
+}
+
+
static Object* Runtime_LocalKeys(Arguments args) {
ASSERT_EQ(args.length(), 1);
CONVERT_CHECKED(JSObject, raw_object, args[0]);
@@ -3286,6 +3615,7 @@
escaped_length += 3;
}
// We don't allow strings that are longer than a maximal length.
+ ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
if (escaped_length > String::kMaxLength) {
Top::context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
@@ -3832,20 +4162,19 @@
static Object* Runtime_StringBuilderConcat(Arguments args) {
NoHandleAllocation ha;
- ASSERT(args.length() == 2);
+ ASSERT(args.length() == 3);
CONVERT_CHECKED(JSArray, array, args[0]);
- CONVERT_CHECKED(String, special, args[1]);
+ if (!args[1]->IsSmi()) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
+ }
+ int array_length = Smi::cast(args[1])->value();
+ CONVERT_CHECKED(String, special, args[2]);
// This assumption is used by the slice encoding in one or two smis.
ASSERT(Smi::kMaxValue >= String::kMaxLength);
int special_length = special->length();
- Object* smi_array_length = array->length();
- if (!smi_array_length->IsSmi()) {
- Top::context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
- int array_length = Smi::cast(smi_array_length)->value();
if (!array->HasFastElements()) {
return Top::Throw(Heap::illegal_argument_symbol());
}
@@ -3863,6 +4192,7 @@
bool ascii = special->IsAsciiRepresentation();
int position = 0;
+ int increment = 0;
for (int i = 0; i < array_length; i++) {
Object* elt = fixed_array->get(i);
if (elt->IsSmi()) {
@@ -3875,10 +4205,10 @@
if (pos + len > special_length) {
return Top::Throw(Heap::illegal_argument_symbol());
}
- position += len;
+ increment = len;
} else {
// Position and length encoded in two smis.
- position += (-len);
+ increment = (-len);
// Get the position and check that it is also a smi.
i++;
if (i >= array_length) {
@@ -3892,17 +4222,18 @@
} else if (elt->IsString()) {
String* element = String::cast(elt);
int element_length = element->length();
- position += element_length;
+ increment = element_length;
if (ascii && !element->IsAsciiRepresentation()) {
ascii = false;
}
} else {
return Top::Throw(Heap::illegal_argument_symbol());
}
- if (position > String::kMaxLength) {
+ if (increment > String::kMaxLength - position) {
Top::context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
+ position += increment;
}
int length = position;
@@ -4116,6 +4447,8 @@
CONVERT_CHECKED(String, x, args[0]);
CONVERT_CHECKED(String, y, args[1]);
+ Counters::string_compare_runtime.Increment();
+
// A few fast case tests before we flatten.
if (x == y) return Smi::FromInt(EQUAL);
if (y->length() == 0) {
@@ -4426,8 +4759,11 @@
CONVERT_ARG_CHECKED(Context, context, 0);
CONVERT_ARG_CHECKED(JSFunction, boilerplate, 1);
+ PretenureFlag pretenure = (context->global_context() == *context)
+ ? TENURED // Allocate global closures in old space.
+ : NOT_TENURED; // Allocate local closures in new space.
Handle<JSFunction> result =
- Factory::NewFunctionFromBoilerplate(boilerplate, context);
+ Factory::NewFunctionFromBoilerplate(boilerplate, context, pretenure);
return *result;
}
@@ -5143,56 +5479,36 @@
validate);
if (boilerplate.is_null()) return Failure::Exception();
Handle<JSFunction> fun =
- Factory::NewFunctionFromBoilerplate(boilerplate, context);
+ Factory::NewFunctionFromBoilerplate(boilerplate, context, NOT_TENURED);
return *fun;
}
-static Handle<JSFunction> GetBuiltinFunction(String* name) {
- LookupResult result;
- Top::global_context()->builtins()->LocalLookup(name, &result);
- return Handle<JSFunction>(JSFunction::cast(result.GetValue()));
-}
+static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) {
+ ASSERT(args.length() == 3);
+ if (!args[0]->IsJSFunction()) {
+ return MakePair(Top::ThrowIllegalOperation(), NULL);
+ }
-
-static Object* CompileDirectEval(Handle<String> source) {
- // Compute the eval context.
HandleScope scope;
+ Handle<JSFunction> callee = args.at<JSFunction>(0);
+ Handle<Object> receiver; // Will be overwritten.
+
+ // Compute the calling context.
+ Handle<Context> context = Handle<Context>(Top::context());
+#ifdef DEBUG
+ // Make sure Top::context() agrees with the old code that traversed
+ // the stack frames to compute the context.
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- Handle<Context> context(Context::cast(frame->context()));
- bool is_global = context->IsGlobalContext();
-
- // Compile source string in the current context.
- Handle<JSFunction> boilerplate = Compiler::CompileEval(
- source,
- context,
- is_global,
- Compiler::DONT_VALIDATE_JSON);
- if (boilerplate.is_null()) return Failure::Exception();
- Handle<JSFunction> fun =
- Factory::NewFunctionFromBoilerplate(boilerplate, context);
- return *fun;
-}
-
-
-static Object* Runtime_ResolvePossiblyDirectEval(Arguments args) {
- ASSERT(args.length() == 2);
-
- HandleScope scope;
-
- CONVERT_ARG_CHECKED(JSFunction, callee, 0);
-
- Handle<Object> receiver;
+ ASSERT(Context::cast(frame->context()) == *context);
+#endif
// Find where the 'eval' symbol is bound. It is unaliased only if
// it is bound in the global context.
- StackFrameLocator locator;
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- Handle<Context> context(Context::cast(frame->context()));
- int index;
- PropertyAttributes attributes;
- while (!context.is_null()) {
+ int index = -1;
+ PropertyAttributes attributes = ABSENT;
+ while (true) {
receiver = context->Lookup(Factory::eval_symbol(), FOLLOW_PROTOTYPE_CHAIN,
&index, &attributes);
// Stop search when eval is found or when the global context is
@@ -5211,46 +5527,42 @@
Handle<Object> name = Factory::eval_symbol();
Handle<Object> reference_error =
Factory::NewReferenceError("not_defined", HandleVector(&name, 1));
- return Top::Throw(*reference_error);
+ return MakePair(Top::Throw(*reference_error), NULL);
}
- if (context->IsGlobalContext()) {
- // 'eval' is bound in the global context, but it may have been overwritten.
- // Compare it to the builtin 'GlobalEval' function to make sure.
- Handle<JSFunction> global_eval =
- GetBuiltinFunction(Heap::global_eval_symbol());
- if (global_eval.is_identical_to(callee)) {
- // A direct eval call.
- if (args[1]->IsString()) {
- CONVERT_ARG_CHECKED(String, source, 1);
- // A normal eval call on a string. Compile it and return the
- // compiled function bound in the local context.
- Object* compiled_source = CompileDirectEval(source);
- if (compiled_source->IsFailure()) return compiled_source;
- receiver = Handle<Object>(frame->receiver());
- callee = Handle<JSFunction>(JSFunction::cast(compiled_source));
- } else {
- // An eval call that is not called on a string. Global eval
- // deals better with this.
- receiver = Handle<Object>(Top::global_context()->global());
- }
- } else {
- // 'eval' is overwritten. Just call the function with the given arguments.
- receiver = Handle<Object>(Top::global_context()->global());
- }
- } else {
+ if (!context->IsGlobalContext()) {
// 'eval' is not bound in the global context. Just call the function
// with the given arguments. This is not necessarily the global eval.
if (receiver->IsContext()) {
context = Handle<Context>::cast(receiver);
receiver = Handle<Object>(context->get(index));
+ } else if (receiver->IsJSContextExtensionObject()) {
+ receiver = Handle<JSObject>(Top::context()->global()->global_receiver());
}
+ return MakePair(*callee, *receiver);
}
- Handle<FixedArray> call = Factory::NewFixedArray(2);
- call->set(0, *callee);
- call->set(1, *receiver);
- return *call;
+ // 'eval' is bound in the global context, but it may have been overwritten.
+ // Compare it to the builtin 'GlobalEval' function to make sure.
+ if (*callee != Top::global_context()->global_eval_fun() ||
+ !args[1]->IsString()) {
+ return MakePair(*callee, Top::context()->global()->global_receiver());
+ }
+
+ // Deal with a normal eval call with a string argument. Compile it
+ // and return the compiled function bound in the local context.
+ Handle<String> source = args.at<String>(1);
+ Handle<JSFunction> boilerplate = Compiler::CompileEval(
+ source,
+ Handle<Context>(Top::context()),
+ Top::context()->IsGlobalContext(),
+ Compiler::DONT_VALIDATE_JSON);
+ if (boilerplate.is_null()) return MakePair(Failure::Exception(), NULL);
+ callee = Factory::NewFunctionFromBoilerplate(
+ boilerplate,
+ Handle<Context>(Top::context()),
+ NOT_TENURED);
+ return MakePair(*callee, args[2]);
}
@@ -5307,11 +5619,11 @@
uint32_t index_limit,
bool fast_elements) :
storage_(storage), index_limit_(index_limit),
- fast_elements_(fast_elements), index_offset_(0) { }
+ index_offset_(0), fast_elements_(fast_elements) { }
void visit(uint32_t i, Handle<Object> elm) {
- uint32_t index = i + index_offset_;
- if (index >= index_limit_) return;
+ if (i >= index_limit_ - index_offset_) return;
+ uint32_t index = index_offset_ + i;
if (fast_elements_) {
ASSERT(index < static_cast<uint32_t>(storage_->length()));
@@ -5327,14 +5639,23 @@
}
void increase_index_offset(uint32_t delta) {
- index_offset_ += delta;
+ if (index_limit_ - index_offset_ < delta) {
+ index_offset_ = index_limit_;
+ } else {
+ index_offset_ += delta;
+ }
}
+ Handle<FixedArray> storage() { return storage_; }
+
private:
Handle<FixedArray> storage_;
+ // Limit on the accepted indices. Elements with indices larger than the
+ // limit are ignored by the visitor.
uint32_t index_limit_;
- bool fast_elements_;
+ // Index after last seen index. Always less than or equal to index_limit_.
uint32_t index_offset_;
+ bool fast_elements_;
};
@@ -5506,6 +5827,11 @@
*
* If a ArrayConcatVisitor object is given, the visitor is called with
* parameters, element's index + visitor_index_offset and the element.
+ *
+ * The returned number of elements is an upper bound on the actual number
+ * of elements added. If the same element occurs in more than one object
+ * in the array's prototype chain, it will be counted more than once, but
+ * will only occur once in the result.
*/
static uint32_t IterateArrayAndPrototypeElements(Handle<JSArray> array,
ArrayConcatVisitor* visitor) {
@@ -5528,8 +5854,14 @@
uint32_t nof_elements = 0;
for (int i = objects.length() - 1; i >= 0; i--) {
Handle<JSObject> obj = objects[i];
- nof_elements +=
+ uint32_t encountered_elements =
IterateElements(Handle<JSObject>::cast(obj), range, visitor);
+
+ if (encountered_elements > JSObject::kMaxElementCount - nof_elements) {
+ nof_elements = JSObject::kMaxElementCount;
+ } else {
+ nof_elements += encountered_elements;
+ }
}
return nof_elements;
@@ -5546,10 +5878,12 @@
* elements. If an argument is not an Array object, the function
* visits the object as if it is an one-element array.
*
- * If the result array index overflows 32-bit integer, the rounded
+ * If the result array index overflows 32-bit unsigned integer, the rounded
* non-negative number is used as new length. For example, if one
* array length is 2^32 - 1, second array length is 1, the
* concatenated array length is 0.
+ * TODO(lrn) Change length behavior to ECMAScript 5 specification (length
+ * is one more than the last array index to get a value assigned).
*/
static uint32_t IterateArguments(Handle<JSArray> arguments,
ArrayConcatVisitor* visitor) {
@@ -5565,16 +5899,23 @@
IterateArrayAndPrototypeElements(array, visitor);
// Total elements of array and its prototype chain can be more than
// the array length, but ArrayConcat can only concatenate at most
- // the array length number of elements.
- visited_elements += (nof_elements > len) ? len : nof_elements;
+ // the array length number of elements. We use the length as an estimate
+ // for the actual number of elements added.
+ uint32_t added_elements = (nof_elements > len) ? len : nof_elements;
+ if (JSArray::kMaxElementCount - visited_elements < added_elements) {
+ visited_elements = JSArray::kMaxElementCount;
+ } else {
+ visited_elements += added_elements;
+ }
if (visitor) visitor->increase_index_offset(len);
-
} else {
if (visitor) {
visitor->visit(0, obj);
visitor->increase_index_offset(1);
}
- visited_elements++;
+ if (visited_elements < JSArray::kMaxElementCount) {
+ visited_elements++;
+ }
}
}
return visited_elements;
@@ -5584,6 +5925,8 @@
/**
* Array::concat implementation.
* See ECMAScript 262, 15.4.4.4.
+ * TODO(lrn): Fix non-compliance for very large concatenations and update to
+ * following the ECMAScript 5 specification.
*/
static Object* Runtime_ArrayConcat(Arguments args) {
ASSERT(args.length() == 1);
@@ -5600,12 +5943,18 @@
{ AssertNoAllocation nogc;
for (uint32_t i = 0; i < num_of_args; i++) {
Object* obj = arguments->GetElement(i);
+ uint32_t length_estimate;
if (obj->IsJSArray()) {
- result_length +=
+ length_estimate =
static_cast<uint32_t>(JSArray::cast(obj)->length()->Number());
} else {
- result_length++;
+ length_estimate = 1;
}
+ if (JSObject::kMaxElementCount - result_length < length_estimate) {
+ result_length = JSObject::kMaxElementCount;
+ break;
+ }
+ result_length += length_estimate;
}
}
@@ -5639,7 +5988,8 @@
IterateArguments(arguments, &visitor);
result->set_length(*len);
- result->set_elements(*storage);
+ // Please note the storage might have changed in the visitor.
+ result->set_elements(*visitor.storage());
return *result;
}
@@ -5814,21 +6164,6 @@
}
-// Find the length of the prototype chain that is to to handled as one. If a
-// prototype object is hidden it is to be viewed as part of the the object it
-// is prototype for.
-static int LocalPrototypeChainLength(JSObject* obj) {
- int count = 1;
- Object* proto = obj->GetPrototype();
- while (proto->IsJSObject() &&
- JSObject::cast(proto)->map()->is_hidden_prototype()) {
- count++;
- proto = JSObject::cast(proto)->GetPrototype();
- }
- return count;
-}
-
-
static Object* DebugLookupResultValue(Object* receiver, String* name,
LookupResult* result,
bool* caught_exception) {
@@ -5998,93 +6333,6 @@
}
-// Return the names of the local named properties.
-// args[0]: object
-static Object* Runtime_DebugLocalPropertyNames(Arguments args) {
- HandleScope scope;
- ASSERT(args.length() == 1);
- if (!args[0]->IsJSObject()) {
- return Heap::undefined_value();
- }
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- // Skip the global proxy as it has no properties and always delegates to the
- // real global object.
- if (obj->IsJSGlobalProxy()) {
- obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
- }
-
- // Find the number of objects making up this.
- int length = LocalPrototypeChainLength(*obj);
-
- // Find the number of local properties for each of the objects.
- int* local_property_count = NewArray<int>(length);
- int total_property_count = 0;
- Handle<JSObject> jsproto = obj;
- for (int i = 0; i < length; i++) {
- int n;
- n = jsproto->NumberOfLocalProperties(static_cast<PropertyAttributes>(NONE));
- local_property_count[i] = n;
- total_property_count += n;
- if (i < length - 1) {
- jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
- }
- }
-
- // Allocate an array with storage for all the property names.
- Handle<FixedArray> names = Factory::NewFixedArray(total_property_count);
-
- // Get the property names.
- jsproto = obj;
- int proto_with_hidden_properties = 0;
- for (int i = 0; i < length; i++) {
- jsproto->GetLocalPropertyNames(*names,
- i == 0 ? 0 : local_property_count[i - 1]);
- if (!GetHiddenProperties(jsproto, false)->IsUndefined()) {
- proto_with_hidden_properties++;
- }
- if (i < length - 1) {
- jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
- }
- }
-
- // Filter out name of hidden propeties object.
- if (proto_with_hidden_properties > 0) {
- Handle<FixedArray> old_names = names;
- names = Factory::NewFixedArray(
- names->length() - proto_with_hidden_properties);
- int dest_pos = 0;
- for (int i = 0; i < total_property_count; i++) {
- Object* name = old_names->get(i);
- if (name == Heap::hidden_symbol()) {
- continue;
- }
- names->set(dest_pos++, name);
- }
- }
-
- DeleteArray(local_property_count);
- return *Factory::NewJSArrayWithElements(names);
-}
-
-
-// Return the names of the local indexed properties.
-// args[0]: object
-static Object* Runtime_DebugLocalElementNames(Arguments args) {
- HandleScope scope;
- ASSERT(args.length() == 1);
- if (!args[0]->IsJSObject()) {
- return Heap::undefined_value();
- }
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- int n = obj->NumberOfLocalElements(static_cast<PropertyAttributes>(NONE));
- Handle<FixedArray> names = Factory::NewFixedArray(n);
- obj->GetLocalElementKeys(*names, static_cast<PropertyAttributes>(NONE));
- return *Factory::NewJSArrayWithElements(names);
-}
-
-
// Return the property type calculated from the property details.
// args[0]: smi with property details.
static Object* Runtime_DebugPropertyTypeFromDetails(Arguments args) {
@@ -6115,54 +6363,6 @@
}
-// Return information on whether an object has a named or indexed interceptor.
-// args[0]: object
-static Object* Runtime_DebugInterceptorInfo(Arguments args) {
- HandleScope scope;
- ASSERT(args.length() == 1);
- if (!args[0]->IsJSObject()) {
- return Smi::FromInt(0);
- }
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- int result = 0;
- if (obj->HasNamedInterceptor()) result |= 2;
- if (obj->HasIndexedInterceptor()) result |= 1;
-
- return Smi::FromInt(result);
-}
-
-
-// Return property names from named interceptor.
-// args[0]: object
-static Object* Runtime_DebugNamedInterceptorPropertyNames(Arguments args) {
- HandleScope scope;
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- if (obj->HasNamedInterceptor()) {
- v8::Handle<v8::Array> result = GetKeysForNamedInterceptor(obj, obj);
- if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
- }
- return Heap::undefined_value();
-}
-
-
-// Return element names from indexed interceptor.
-// args[0]: object
-static Object* Runtime_DebugIndexedInterceptorElementNames(Arguments args) {
- HandleScope scope;
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- if (obj->HasIndexedInterceptor()) {
- v8::Handle<v8::Array> result = GetKeysForIndexedInterceptor(obj, obj);
- if (!result.IsEmpty()) return *v8::Utils::OpenHandle(*result);
- }
- return Heap::undefined_value();
-}
-
-
// Return property value from named interceptor.
// args[0]: object
// args[1]: property name
@@ -7808,7 +8008,8 @@
HandleScope scope;
- int initial_size = limit < 10 ? limit : 10;
+ limit = Max(limit, 0); // Ensure that limit is not negative.
+ int initial_size = Min(limit, 10);
Handle<JSArray> result = Factory::NewJSArray(initial_size * 3);
StackFrameIterator iter;
diff --git a/src/runtime.h b/src/runtime.h
index 8580233..efef7db 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -52,6 +52,11 @@
F(IsPropertyEnumerable, 2, 1) \
F(GetPropertyNames, 1, 1) \
F(GetPropertyNamesFast, 1, 1) \
+ F(GetLocalPropertyNames, 1, 1) \
+ F(GetLocalElementNames, 1, 1) \
+ F(GetInterceptorInfo, 1, 1) \
+ F(GetNamedInterceptorPropertyNames, 1, 1) \
+ F(GetIndexedInterceptorElementNames, 1, 1) \
F(GetArgumentsProperty, 1, 1) \
F(ToFastProperties, 1, 1) \
F(ToSlowProperties, 1, 1) \
@@ -61,6 +66,10 @@
\
F(IsConstructCall, 0, 1) \
\
+ F(GetOwnProperty, 2, 1) \
+ \
+ F(IsExtensible, 1, 1) \
+ \
/* Utilities */ \
F(GetCalledFunction, 0, 1) \
F(GetFunctionDelegate, 1, 1) \
@@ -103,7 +112,7 @@
F(NumberUnaryMinus, 1, 1) \
\
F(StringAdd, 2, 1) \
- F(StringBuilderConcat, 2, 1) \
+ F(StringBuilderConcat, 3, 1) \
\
/* Bit operations */ \
F(NumberOr, 2, 1) \
@@ -146,6 +155,7 @@
\
/* Strings */ \
F(StringCharCodeAt, 2, 1) \
+ F(StringCharAt, 2, 1) \
F(StringIndexOf, 3, 1) \
F(StringLastIndexOf, 3, 1) \
F(StringLocaleCompare, 2, 1) \
@@ -202,7 +212,7 @@
\
/* Eval */ \
F(GlobalReceiver, 1, 1) \
- F(ResolvePossiblyDirectEval, 2, 1) \
+ F(ResolvePossiblyDirectEval, 3, 2) \
\
F(SetProperty, -1 /* 3 or 4 */, 1) \
F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
@@ -223,6 +233,10 @@
F(CreateObjectLiteralBoilerplate, 3, 1) \
F(CloneLiteralBoilerplate, 1, 1) \
F(CloneShallowLiteralBoilerplate, 1, 1) \
+ F(CreateObjectLiteral, 3, 1) \
+ F(CreateObjectLiteralShallow, 3, 1) \
+ F(CreateArrayLiteral, 3, 1) \
+ F(CreateArrayLiteralShallow, 3, 1) \
\
/* Catch context extension objects */ \
F(CreateCatchExtensionObject, 2, 1) \
@@ -278,14 +292,9 @@
F(Break, 0, 1) \
F(DebugGetPropertyDetails, 2, 1) \
F(DebugGetProperty, 2, 1) \
- F(DebugLocalPropertyNames, 1, 1) \
- F(DebugLocalElementNames, 1, 1) \
F(DebugPropertyTypeFromDetails, 1, 1) \
F(DebugPropertyAttributesFromDetails, 1, 1) \
F(DebugPropertyIndexFromDetails, 1, 1) \
- F(DebugInterceptorInfo, 1, 1) \
- F(DebugNamedInterceptorPropertyNames, 1, 1) \
- F(DebugIndexedInterceptorElementNames, 1, 1) \
F(DebugNamedInterceptorPropertyValue, 2, 1) \
F(DebugIndexedInterceptorElementValue, 2, 1) \
F(CheckExecutionState, 1, 1) \
diff --git a/src/runtime.js b/src/runtime.js
index 105749a..ce2f197 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -114,24 +114,33 @@
// ECMA-262, section 11.8.5, page 53. The 'ncr' parameter is used as
// the result when either (or both) the operands are NaN.
function COMPARE(x, ncr) {
- // Fast case for numbers and strings.
- if (IS_NUMBER(this) && IS_NUMBER(x)) {
- return %NumberCompare(this, x, ncr);
- }
- if (IS_STRING(this) && IS_STRING(x)) {
- return %StringCompare(this, x);
+ var left;
+
+ // Fast cases for string, numbers and undefined compares.
+ if (IS_STRING(this)) {
+ if (IS_STRING(x)) return %_StringCompare(this, x);
+ if (IS_UNDEFINED(x)) return ncr;
+ left = this;
+ } else if (IS_NUMBER(this)) {
+ if (IS_NUMBER(x)) return %NumberCompare(this, x, ncr);
+ if (IS_UNDEFINED(x)) return ncr;
+ left = this;
+ } else if (IS_UNDEFINED(this)) {
+ return ncr;
+ } else {
+ if (IS_UNDEFINED(x)) return ncr;
+ left = %ToPrimitive(this, NUMBER_HINT);
}
// Default implementation.
- var a = %ToPrimitive(this, NUMBER_HINT);
- var b = %ToPrimitive(x, NUMBER_HINT);
- if (IS_STRING(a) && IS_STRING(b)) {
- return %StringCompare(a, b);
+ var right = %ToPrimitive(x, NUMBER_HINT);
+ if (IS_STRING(left) && IS_STRING(right)) {
+ return %_StringCompare(left, right);
} else {
- var a_number = %ToNumber(a);
- var b_number = %ToNumber(b);
- if (NUMBER_IS_NAN(a_number) || NUMBER_IS_NAN(b_number)) return ncr;
- return %NumberCompare(a_number, b_number, ncr);
+ var left_number = %ToNumber(left);
+ var right_number = %ToNumber(right);
+ if (NUMBER_IS_NAN(left_number) || NUMBER_IS_NAN(right_number)) return ncr;
+ return %NumberCompare(left_number, right_number, ncr);
}
}
@@ -468,6 +477,17 @@
}
+// Specialized version of String.charAt. It assumes string as
+// the receiver type and that the index is a number.
+function STRING_CHAR_AT(pos) {
+ var char_code = %_FastCharCodeAt(this, pos);
+ if (!%_IsSmi(char_code)) {
+ return %StringCharAt(this, pos);
+ }
+ return %CharFromCode(char_code);
+}
+
+
/* -------------------------------------
- - - C o n v e r s i o n s - - -
-------------------------------------
diff --git a/src/scopes.cc b/src/scopes.cc
index 7da06cd..701e5e3 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -189,8 +189,7 @@
variables_.Declare(this, Factory::this_symbol(), Variable::VAR,
false, Variable::THIS);
var->rewrite_ = new Slot(var, Slot::PARAMETER, -1);
- receiver_ = new VariableProxy(Factory::this_symbol(), true, false);
- receiver_->BindTo(var);
+ receiver_ = var;
if (is_function_scope()) {
// Declare 'arguments' variable which exists in all functions.
@@ -237,7 +236,7 @@
Variable* Scope::DeclareGlobal(Handle<String> name) {
ASSERT(is_global_scope());
- return variables_.Declare(this, name, Variable::DYNAMIC, true,
+ return variables_.Declare(this, name, Variable::DYNAMIC_GLOBAL, true,
Variable::NORMAL);
}
diff --git a/src/scopes.h b/src/scopes.h
index fc627df..9b506d9 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -206,8 +206,13 @@
// ---------------------------------------------------------------------------
// Accessors.
- // The variable corresponding to the (function) receiver.
- VariableProxy* receiver() const { return receiver_; }
+ // A new variable proxy corresponding to the (function) receiver.
+ VariableProxy* receiver() const {
+ VariableProxy* proxy =
+ new VariableProxy(Factory::this_symbol(), true, false);
+ proxy->BindTo(receiver_);
+ return proxy;
+ }
// The variable holding the function literal for named function
// literals, or NULL.
@@ -314,7 +319,7 @@
// Declarations.
ZoneList<Declaration*> decls_;
// Convenience variable.
- VariableProxy* receiver_;
+ Variable* receiver_;
// Function variable, if any; function scopes only.
Variable* function_;
// Convenience variable; function scopes only.
diff --git a/src/serialize.cc b/src/serialize.cc
index 899e2e7..ec3a967 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -55,9 +55,8 @@
static int MappedTo(HeapObject* obj) {
ASSERT(IsMapped(obj));
- return reinterpret_cast<intptr_t>(serialization_map_->Lookup(Key(obj),
- Hash(obj),
- false)->value);
+ return static_cast<int>(reinterpret_cast<intptr_t>(
+ serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
}
static void Map(HeapObject* obj, int to) {
@@ -81,7 +80,7 @@
}
static uint32_t Hash(HeapObject* obj) {
- return reinterpret_cast<intptr_t>(obj->address());
+ return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
}
static void* Key(HeapObject* obj) {
@@ -242,7 +241,7 @@
static const RefTableEntry ref_table[] = {
// Builtins
-#define DEF_ENTRY_C(name) \
+#define DEF_ENTRY_C(name, ignored) \
{ C_BUILTIN, \
Builtins::c_##name, \
"Builtins::" #name },
@@ -250,11 +249,11 @@
BUILTIN_LIST_C(DEF_ENTRY_C)
#undef DEF_ENTRY_C
-#define DEF_ENTRY_C(name) \
+#define DEF_ENTRY_C(name, ignored) \
{ BUILTIN, \
Builtins::name, \
"Builtins::" #name },
-#define DEF_ENTRY_A(name, kind, state) DEF_ENTRY_C(name)
+#define DEF_ENTRY_A(name, kind, state) DEF_ENTRY_C(name, ignored)
BUILTIN_LIST_C(DEF_ENTRY_C)
BUILTIN_LIST_A(DEF_ENTRY_A)
@@ -397,10 +396,6 @@
"V8::RandomPositiveSmi");
// Miscellaneous
- Add(ExternalReference::builtin_passed_function().address(),
- UNCLASSIFIED,
- 1,
- "Builtins::builtin_passed_function");
Add(ExternalReference::the_hole_value_location().address(),
UNCLASSIFIED,
2,
@@ -484,7 +479,20 @@
UNCLASSIFIED,
21,
"NativeRegExpMacroAssembler::GrowStack()");
+ Add(ExternalReference::re_word_character_map().address(),
+ UNCLASSIFIED,
+ 22,
+ "NativeRegExpMacroAssembler::word_character_map");
#endif
+ // Keyed lookup cache.
+ Add(ExternalReference::keyed_lookup_cache_keys().address(),
+ UNCLASSIFIED,
+ 23,
+ "KeyedLookupCache::keys()");
+ Add(ExternalReference::keyed_lookup_cache_field_offsets().address(),
+ UNCLASSIFIED,
+ 24,
+ "KeyedLookupCache::field_offsets()");
}
@@ -550,11 +558,10 @@
bool Serializer::serialization_enabled_ = false;
bool Serializer::too_late_to_enable_now_ = false;
+ExternalReferenceDecoder* Deserializer::external_reference_decoder_ = NULL;
-Deserializer::Deserializer(SnapshotByteSource* source)
- : source_(source),
- external_reference_decoder_(NULL) {
+Deserializer::Deserializer(SnapshotByteSource* source) : source_(source) {
}
@@ -624,7 +631,7 @@
return HeapObject::FromAddress(pages_[space][0] + offset);
}
ASSERT(SpaceIsPaged(space));
- int page_of_pointee = offset >> Page::kPageSizeBits;
+ int page_of_pointee = offset >> kPageSizeBits;
Address object_address = pages_[space][page_of_pointee] +
(offset & Page::kPageAlignmentMask);
return HeapObject::FromAddress(object_address);
@@ -644,8 +651,26 @@
external_reference_decoder_ = new ExternalReferenceDecoder();
Heap::IterateRoots(this, VISIT_ONLY_STRONG);
ASSERT(source_->AtEOF());
- delete external_reference_decoder_;
- external_reference_decoder_ = NULL;
+}
+
+
+void Deserializer::DeserializePartial(Object** root) {
+ // Don't GC while deserializing - just expand the heap.
+ AlwaysAllocateScope always_allocate;
+ // Don't use the free lists while deserializing.
+ LinearAllocationScope allocate_linearly;
+ if (external_reference_decoder_ == NULL) {
+ external_reference_decoder_ = new ExternalReferenceDecoder();
+ }
+ VisitPointer(root);
+}
+
+
+void Deserializer::TearDown() {
+ if (external_reference_decoder_ != NULL) {
+ delete external_reference_decoder_;
+ external_reference_decoder_ = NULL;
+ }
}
@@ -672,6 +697,9 @@
*write_back = HeapObject::FromAddress(address);
Object** current = reinterpret_cast<Object**>(address);
Object** limit = current + (size >> kPointerSizeLog2);
+ if (FLAG_log_snapshot_positions) {
+ LOG(SnapshotPositionEvent(address, source_->position()));
+ }
ReadChunk(current, limit, space_number, address);
}
@@ -858,6 +886,11 @@
*current++ = reinterpret_cast<Object*>(resource);
break;
}
+ case ROOT_SERIALIZATION: {
+ int root_id = source_->GetInt();
+ *current++ = Heap::roots_address()[root_id];
+ break;
+ }
default:
UNREACHABLE();
}
@@ -910,7 +943,9 @@
Serializer::Serializer(SnapshotByteSink* sink)
: sink_(sink),
current_root_index_(0),
- external_reference_encoder_(NULL) {
+ external_reference_encoder_(NULL),
+ partial_(false),
+ large_object_total_(0) {
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
@@ -938,6 +973,16 @@
}
+void Serializer::SerializePartial(Object** object) {
+ partial_ = true;
+ external_reference_encoder_ = new ExternalReferenceEncoder();
+ this->VisitPointer(object);
+ delete external_reference_encoder_;
+ external_reference_encoder_ = NULL;
+ SerializationAddressMapper::Zap();
+}
+
+
void Serializer::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsSmi()) {
@@ -953,19 +998,38 @@
}
+int Serializer::RootIndex(HeapObject* heap_object) {
+ for (int i = 0; i < Heap::kRootListLength; i++) {
+ Object* root = Heap::roots_address()[i];
+ if (root == heap_object) return i;
+ }
+ return kInvalidRootIndex;
+}
+
+
void Serializer::SerializeObject(
Object* o,
ReferenceRepresentation reference_representation) {
CHECK(o->IsHeapObject());
HeapObject* heap_object = HeapObject::cast(o);
+ if (partial_) {
+ int root_index = RootIndex(heap_object);
+ if (root_index != kInvalidRootIndex) {
+ sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
+ sink_->PutInt(root_index, "root_index");
+ return;
+ }
+ // All the symbols that the snapshot needs should be in the root table.
+ ASSERT(!heap_object->IsSymbol());
+ }
if (SerializationAddressMapper::IsMapped(heap_object)) {
int space = SpaceOfAlreadySerializedObject(heap_object);
int address = SerializationAddressMapper::MappedTo(heap_object);
int offset = CurrentAllocationAddress(space) - address;
bool from_start = true;
if (SpaceIsPaged(space)) {
- if ((CurrentAllocationAddress(space) >> Page::kPageSizeBits) ==
- (address >> Page::kPageSizeBits)) {
+ if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
+ (address >> kPageSizeBits)) {
from_start = false;
address = offset;
}
@@ -1028,6 +1092,8 @@
}
sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
+ LOG(SnapshotPositionEvent(object_->address(), sink_->Position()));
+
// Mark this object as already serialized.
bool start_new_page;
SerializationAddressMapper::Map(
@@ -1192,6 +1258,7 @@
// In large object space we merely number the objects instead of trying to
// determine some sort of address.
*new_page = true;
+ large_object_total_ += size;
return fullness_[LO_SPACE]++;
}
*new_page = false;
diff --git a/src/serialize.h b/src/serialize.h
index 96bd751..8dd193f 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -147,6 +147,8 @@
return position_ == length_;
}
+ const int position() { return position_; }
+
private:
const byte* data_;
int length_;
@@ -199,7 +201,8 @@
SYNCHRONIZE = 36,
START_NEW_PAGE_SERIALIZATION = 37,
NATIVES_STRING_RESOURCE = 38,
- // Free: 39-47.
+ ROOT_SERIALIZATION = 39,
+ // Free: 40-47.
BACKREF_SERIALIZATION = 48,
// One per space, must be kSpaceMask aligned.
// Free: 57-63.
@@ -238,10 +241,16 @@
// Deserialize the snapshot into an empty heap.
void Deserialize();
+
+ // Deserialize a single object and the objects reachable from it.
+ void DeserializePartial(Object** root);
+
#ifdef DEBUG
virtual void Synchronize(const char* tag);
#endif
+ static void TearDown();
+
private:
virtual void VisitPointers(Object** start, Object** end);
@@ -266,7 +275,7 @@
List<Address> pages_[SerDes::kNumberOfSpaces];
SnapshotByteSource* source_;
- ExternalReferenceDecoder* external_reference_decoder_;
+ static ExternalReferenceDecoder* external_reference_decoder_;
// This is the address of the next object that will be allocated in each
// space. It is used to calculate the addresses of back-references.
Address high_water_[LAST_SPACE + 1];
@@ -287,16 +296,24 @@
Put(byte, description);
}
void PutInt(uintptr_t integer, const char* description);
+ virtual int Position() = 0;
};
class Serializer : public SerDes {
public:
explicit Serializer(SnapshotByteSink* sink);
- // Serialize the current state of the heap. This operation destroys the
- // heap contents.
+ // Serialize the current state of the heap.
void Serialize();
+ // Serialize a single object and the objects reachable from it.
+ void SerializePartial(Object** obj);
void VisitPointers(Object** start, Object** end);
+ // You can call this after serialization to find out how much space was used
+ // in each space.
+ int CurrentAllocationAddress(int space) {
+ if (SpaceIsLarge(space)) return large_object_total_;
+ return fullness_[space];
+ }
static void Enable() {
if (!serialization_enabled_) {
@@ -366,13 +383,11 @@
// once the map has been used for the serialization address.
static int SpaceOfAlreadySerializedObject(HeapObject* object);
int Allocate(int space, int size, bool* new_page_started);
- int CurrentAllocationAddress(int space) {
- if (SpaceIsLarge(space)) space = LO_SPACE;
- return fullness_[space];
- }
int EncodeExternalReference(Address addr) {
return external_reference_encoder_->Encode(addr);
}
+ int RootIndex(HeapObject* heap_object);
+ static const int kInvalidRootIndex = -1;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references. Large objects are
@@ -382,9 +397,11 @@
SnapshotByteSink* sink_;
int current_root_index_;
ExternalReferenceEncoder* external_reference_encoder_;
+ bool partial_;
static bool serialization_enabled_;
// Did we already make use of the fact that serialization was not enabled?
static bool too_late_to_enable_now_;
+ int large_object_total_;
friend class ObjectSerializer;
friend class Deserializer;
diff --git a/src/snapshot-common.cc b/src/snapshot-common.cc
index c01baad..448c3fd 100644
--- a/src/snapshot-common.cc
+++ b/src/snapshot-common.cc
@@ -79,6 +79,9 @@
fputc(byte, fp_);
}
}
+ virtual int Position() {
+ return ftell(fp_);
+ }
private:
FILE* fp_;
diff --git a/src/spaces.cc b/src/spaces.cc
index f3b6b9f..cd09398 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -92,6 +92,7 @@
cur_addr_ = cur_page->ObjectAreaStart();
cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
+ if (cur_addr_ == end_addr_) return false;
ASSERT(cur_addr_ < cur_limit_);
#ifdef DEBUG
Verify();
@@ -398,7 +399,7 @@
// start+size. Page::kPageSize is a power of two so we can divide by
// shifting.
return static_cast<int>((RoundDown(start + size, Page::kPageSize)
- - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits);
+ - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
}
@@ -412,7 +413,7 @@
if (size_ + static_cast<int>(chunk_size) > capacity_) {
// Request as many pages as we can.
chunk_size = capacity_ - size_;
- requested_pages = static_cast<int>(chunk_size >> Page::kPageSizeBits);
+ requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
if (requested_pages <= 0) return Page::FromAddress(NULL);
}
@@ -1735,7 +1736,8 @@
Memory::Address_at(start + i) = kZapValue;
}
#endif
- ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
+ // We only use the freelists with mark-sweep.
+ ASSERT(!MarkCompactCollector::IsCompacting());
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(object_size_);
node->set_next(head_);
@@ -1821,6 +1823,50 @@
}
+bool NewSpace::ReserveSpace(int bytes) {
+ // We can't reliably unpack a partial snapshot that needs more new space
+ // space than the minimum NewSpace size.
+ ASSERT(bytes <= InitialCapacity());
+ Address limit = allocation_info_.limit;
+ Address top = allocation_info_.top;
+ return limit - top >= bytes;
+}
+
+
+bool PagedSpace::ReserveSpace(int bytes) {
+ Address limit = allocation_info_.limit;
+ Address top = allocation_info_.top;
+ if (limit - top >= bytes) return true;
+
+ // There wasn't enough space in the current page. Lets put the rest
+ // of the page on the free list and start a fresh page.
+ PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
+
+ Page* reserved_page = TopPageOf(allocation_info_);
+ int bytes_left_to_reserve = bytes;
+ while (bytes_left_to_reserve > 0) {
+ if (!reserved_page->next_page()->is_valid()) {
+ if (Heap::OldGenerationAllocationLimitReached()) return false;
+ Expand(reserved_page);
+ }
+ bytes_left_to_reserve -= Page::kPageSize;
+ reserved_page = reserved_page->next_page();
+ if (!reserved_page->is_valid()) return false;
+ }
+ ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
+ SetAllocationInfo(&allocation_info_,
+ TopPageOf(allocation_info_)->next_page());
+ return true;
+}
+
+
+// You have to call this last, since the implementation from PagedSpace
+// doesn't know that memory was 'promised' to large object space.
+bool LargeObjectSpace::ReserveSpace(int bytes) {
+ return Heap::OldGenerationSpaceAvailable() >= bytes;
+}
+
+
// Slow case for normal allocation. Try in order: (1) allocate in the next
// page in the space, (2) allocate off the space's free list, (3) expand the
// space, (4) fail.
@@ -1864,19 +1910,37 @@
}
-// Add the block at the top of the page to the space's free list, set the
-// allocation info to the next page (assumed to be one), and allocate
-// linearly there.
-HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
- int size_in_bytes) {
- ASSERT(current_page->next_page()->is_valid());
- // Add the block at the top of this page to the free list.
+void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
int free_size =
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
if (free_size > 0) {
int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
accounting_stats_.WasteBytes(wasted_bytes);
}
+}
+
+
+void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
+ int free_size =
+ static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
+ // In the fixed space free list all the free list items have the right size.
+ // We use up the rest of the page while preserving this invariant.
+ while (free_size >= object_size_in_bytes_) {
+ free_list_.Free(allocation_info_.top);
+ allocation_info_.top += object_size_in_bytes_;
+ free_size -= object_size_in_bytes_;
+ accounting_stats_.WasteBytes(object_size_in_bytes_);
+ }
+}
+
+
+// Add the block at the top of the page to the space's free list, set the
+// allocation info to the next page (assumed to be one), and allocate
+// linearly there.
+HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
+ int size_in_bytes) {
+ ASSERT(current_page->next_page()->is_valid());
+ PutRestOfCurrentPageOnFreeList(current_page);
SetAllocationInfo(&allocation_info_, current_page->next_page());
return AllocateLinearly(&allocation_info_, size_in_bytes);
}
diff --git a/src/spaces.h b/src/spaces.h
index 75b992f..4786fb4 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -65,20 +65,23 @@
// Some assertion macros used in the debugging mode.
-#define ASSERT_PAGE_ALIGNED(address) \
+#define ASSERT_PAGE_ALIGNED(address) \
ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
-#define ASSERT_OBJECT_ALIGNED(address) \
+#define ASSERT_OBJECT_ALIGNED(address) \
ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
-#define ASSERT_OBJECT_SIZE(size) \
+#define ASSERT_MAP_ALIGNED(address) \
+ ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
+
+#define ASSERT_OBJECT_SIZE(size) \
ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
-#define ASSERT_PAGE_OFFSET(offset) \
- ASSERT((Page::kObjectStartOffset <= offset) \
+#define ASSERT_PAGE_OFFSET(offset) \
+ ASSERT((Page::kObjectStartOffset <= offset) \
&& (offset <= Page::kPageSize))
-#define ASSERT_MAP_PAGE_INDEX(index) \
+#define ASSERT_MAP_PAGE_INDEX(index) \
ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
@@ -106,11 +109,8 @@
// For this reason we add an offset to get room for the Page data at the start.
//
// The mark-compact collector transforms a map pointer into a page index and a
-// page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
-// 8K) in total. Because a map pointer is aligned to the pointer size (4
-// bytes), 11 bits are enough to encode the page offset. 21 bits (10 for the
-// page index + 11 for the offset in the page) are required to encode a map
-// pointer.
+// page offset. The excact encoding is described in the comments for
+// class MapWord in objects.h.
//
// The only way to get a page pointer is by calling factory methods:
// Page* p = Page::FromAddress(addr); or
@@ -212,9 +212,6 @@
static void set_rset_state(RSetState state) { rset_state_ = state; }
#endif
- // 8K bytes per page.
- static const int kPageSizeBits = 13;
-
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
@@ -308,6 +305,14 @@
virtual void Print() = 0;
#endif
+ // After calling this we can allocate a certain number of bytes using only
+ // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
+ // without using freelists or causing a GC. This is used by partial
+ // snapshots. It returns true of space was reserved or false if a GC is
+ // needed. For paged spaces the space requested must include the space wasted
+ // at the end of each when allocating linearly.
+ virtual bool ReserveSpace(int bytes) = 0;
+
private:
AllocationSpace id_;
Executability executable_;
@@ -514,7 +519,7 @@
#endif
// Due to encoding limitation, we can only have 8K chunks.
- static const int kMaxNofChunks = 1 << Page::kPageSizeBits;
+ static const int kMaxNofChunks = 1 << kPageSizeBits;
// If a chunk has at least 16 pages, the maximum heap size is about
// 8K * 8K * 16 = 1G bytes.
#ifdef V8_TARGET_ARCH_X64
@@ -890,6 +895,10 @@
// collection.
inline Object* MCAllocateRaw(int size_in_bytes);
+ virtual bool ReserveSpace(int bytes);
+
+ // Used by ReserveSpace.
+ virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
// ---------------------------------------------------------------------------
// Mark-compact collection support functions
@@ -996,6 +1005,9 @@
HeapObject* SlowMCAllocateRaw(int size_in_bytes);
#ifdef DEBUG
+ // Returns the number of total pages in this space.
+ int CountTotalPages();
+
void DoPrintRSet(const char* space_name);
#endif
private:
@@ -1005,11 +1017,6 @@
// Returns a pointer to the page of the relocation pointer.
Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
-#ifdef DEBUG
- // Returns the number of total pages in this space.
- int CountTotalPages();
-#endif
-
friend class PageIterator;
};
@@ -1120,13 +1127,18 @@
return static_cast<int>(addr - low());
}
- // If we don't have this here then SemiSpace will be abstract. However
- // it should never be called.
+ // If we don't have these here then SemiSpace will be abstract. However
+ // they should never be called.
virtual int Size() {
UNREACHABLE();
return 0;
}
+ virtual bool ReserveSpace(int bytes) {
+ UNREACHABLE();
+ return false;
+ }
+
bool is_committed() { return committed_; }
bool Commit();
bool Uncommit();
@@ -1350,6 +1362,8 @@
bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
+ virtual bool ReserveSpace(int bytes);
+
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the space by marking it read-only/writable.
virtual void Protect();
@@ -1636,6 +1650,8 @@
// collection.
virtual void MCCommitRelocationInfo();
+ virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
+
#ifdef DEBUG
// Reports statistics for the space
void ReportStatistics();
@@ -1697,6 +1713,8 @@
// collection.
virtual void MCCommitRelocationInfo();
+ virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
+
#ifdef DEBUG
// Reports statistic info of the space
void ReportStatistics();
@@ -1713,6 +1731,10 @@
// the page after current_page (there is assumed to be one).
HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
+ void ResetFreeList() {
+ free_list_.Reset();
+ }
+
private:
// The size of objects in this space.
int object_size_in_bytes_;
@@ -1743,12 +1765,81 @@
// Constants.
static const int kMaxMapPageIndex = (1 << MapWord::kMapPageIndexBits) - 1;
+ // Are map pointers encodable into map word?
+ bool MapPointersEncodable() {
+ if (!FLAG_use_big_map_space) {
+ ASSERT(CountTotalPages() <= kMaxMapPageIndex);
+ return true;
+ }
+ int n_of_pages = Capacity() / Page::kObjectAreaSize;
+ ASSERT(n_of_pages == CountTotalPages());
+ return n_of_pages <= kMaxMapPageIndex;
+ }
+
+ // Should be called after forced sweep to find out if map space needs
+ // compaction.
+ bool NeedsCompaction(int live_maps) {
+ return !MapPointersEncodable() && live_maps <= kCompactionThreshold;
+ }
+
+ Address TopAfterCompaction(int live_maps) {
+ ASSERT(NeedsCompaction(live_maps));
+
+ int pages_left = live_maps / kMapsPerPage;
+ PageIterator it(this, PageIterator::ALL_PAGES);
+ while (pages_left-- > 0) {
+ ASSERT(it.has_next());
+ it.next()->ClearRSet();
+ }
+ ASSERT(it.has_next());
+ Page* top_page = it.next();
+ top_page->ClearRSet();
+ ASSERT(top_page->is_valid());
+
+ int offset = live_maps % kMapsPerPage * Map::kSize;
+ Address top = top_page->ObjectAreaStart() + offset;
+ ASSERT(top < top_page->ObjectAreaEnd());
+ ASSERT(Contains(top));
+
+ return top;
+ }
+
+ void FinishCompaction(Address new_top, int live_maps) {
+ Page* top_page = Page::FromAddress(new_top);
+ ASSERT(top_page->is_valid());
+
+ SetAllocationInfo(&allocation_info_, top_page);
+ allocation_info_.top = new_top;
+
+ int new_size = live_maps * Map::kSize;
+ accounting_stats_.DeallocateBytes(accounting_stats_.Size());
+ accounting_stats_.AllocateBytes(new_size);
+
+#ifdef DEBUG
+ if (FLAG_enable_slow_asserts) {
+ int actual_size = 0;
+ for (Page* p = first_page_; p != top_page; p = p->next_page())
+ actual_size += kMapsPerPage * Map::kSize;
+ actual_size += (new_top - top_page->ObjectAreaStart());
+ ASSERT(accounting_stats_.Size() == actual_size);
+ }
+#endif
+
+ Shrink();
+ ResetFreeList();
+ }
+
protected:
#ifdef DEBUG
virtual void VerifyObject(HeapObject* obj);
#endif
private:
+ static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
+
+ // Do map space compaction if there is a page gap.
+ static const int kCompactionThreshold = kMapsPerPage * (kMaxMapPageIndex - 1);
+
// An array of page start address in a map space.
Address page_addresses_[kMaxMapPageIndex + 1];
@@ -1893,6 +1984,11 @@
// Checks whether the space is empty.
bool IsEmpty() { return first_chunk_ == NULL; }
+ // See the comments for ReserveSpace in the Space class. This has to be
+ // called after ReserveSpace has been called on the paged spaces, since they
+ // may use some memory, leaving less for large objects.
+ virtual bool ReserveSpace(int bytes);
+
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the space by marking it read-only/writable.
void Protect();
diff --git a/src/string.js b/src/string.js
index 4f9957a..ed938ec 100644
--- a/src/string.js
+++ b/src/string.js
@@ -87,12 +87,14 @@
// ECMA-262, section 15.5.4.6
function StringConcat() {
- var len = %_ArgumentsLength();
- var parts = new $Array(len + 1);
- parts[0] = ToString(this);
- for (var i = 0; i < len; i++)
- parts[i + 1] = ToString(%_Arguments(i));
- return parts.join('');
+ var len = %_ArgumentsLength() + 1;
+ var parts = new $Array(len);
+ parts[0] = IS_STRING(this) ? this : ToString(this);
+ for (var i = 1; i < len; i++) {
+ var part = %_Arguments(i - 1);
+ parts[i] = IS_STRING(part) ? part : ToString(part);
+ }
+ return %StringBuilderConcat(parts, len, "");
}
// Match ES3 and Safari
@@ -180,7 +182,7 @@
}
return %CharFromCode(char_code);
}
- return %SubString(string, start, end);
+ return %_SubString(string, start, end);
}
@@ -194,7 +196,7 @@
// ECMA-262, section 15.5.4.11
function StringReplace(search, replace) {
- var subject = ToString(this);
+ var subject = IS_STRING(this) ? this : ToString(this);
// Delegate to one of the regular expression variants if necessary.
if (IS_REGEXP(search)) {
@@ -207,7 +209,7 @@
}
// Convert the search argument to a string and search for it.
- search = ToString(search);
+ search = IS_STRING(search) ? search : ToString(search);
var start = %StringIndexOf(subject, search, 0);
if (start < 0) return subject;
var end = start + search.length;
@@ -222,7 +224,8 @@
} else {
reusableMatchInfo[CAPTURE0] = start;
reusableMatchInfo[CAPTURE1] = end;
- ExpandReplacement(ToString(replace), subject, reusableMatchInfo, builder);
+ if (!IS_STRING(replace)) replace = ToString(replace);
+ ExpandReplacement(replace, subject, reusableMatchInfo, builder);
}
// suffix
@@ -505,7 +508,7 @@
// ECMA-262 section 15.5.4.14
function StringSplit(separator, limit) {
var subject = ToString(this);
- limit = (limit === void 0) ? 0xffffffff : ToUint32(limit);
+ limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
if (limit === 0) return [];
// ECMA-262 says that if separator is undefined, the result should
@@ -604,22 +607,30 @@
// ECMA-262 section 15.5.4.15
function StringSubstring(start, end) {
- var s = ToString(this);
+ var s = this;
+ if (!IS_STRING(s)) s = ToString(s);
var s_len = s.length;
+
var start_i = TO_INTEGER(start);
+ if (start_i < 0) {
+ start_i = 0;
+ } else if (start_i > s_len) {
+ start_i = s_len;
+ }
+
var end_i = s_len;
- if (!IS_UNDEFINED(end))
+ if (!IS_UNDEFINED(end)) {
end_i = TO_INTEGER(end);
-
- if (start_i < 0) start_i = 0;
- if (start_i > s_len) start_i = s_len;
- if (end_i < 0) end_i = 0;
- if (end_i > s_len) end_i = s_len;
-
- if (start_i > end_i) {
- var tmp = end_i;
- end_i = start_i;
- start_i = tmp;
+ if (end_i > s_len) {
+ end_i = s_len;
+ } else {
+ if (end_i < 0) end_i = 0;
+ if (start_i > end_i) {
+ var tmp = end_i;
+ end_i = start_i;
+ start_i = tmp;
+ }
+ }
}
return SubString(s, start_i, end_i);
@@ -790,21 +801,14 @@
}
-// StringBuilder support.
-
-function StringBuilder() {
- this.elements = new $Array();
-}
-
-
+// ReplaceResultBuilder support.
function ReplaceResultBuilder(str) {
this.elements = new $Array();
this.special_string = str;
}
-ReplaceResultBuilder.prototype.add =
-StringBuilder.prototype.add = function(str) {
+ReplaceResultBuilder.prototype.add = function(str) {
if (!IS_STRING(str)) str = ToString(str);
if (str.length > 0) {
var elements = this.elements;
@@ -828,13 +832,9 @@
}
-StringBuilder.prototype.generate = function() {
- return %StringBuilderConcat(this.elements, "");
-}
-
-
ReplaceResultBuilder.prototype.generate = function() {
- return %StringBuilderConcat(this.elements, this.special_string);
+ var elements = this.elements;
+ return %StringBuilderConcat(elements, elements.length, this.special_string);
}
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 51d9ddb..9ab83be 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -120,7 +120,7 @@
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
- code = compiler.CompileLoadCallback(receiver, holder, callback, name);
+ code = compiler.CompileLoadCallback(name, receiver, holder, callback);
if (code->IsFailure()) return code;
LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
@@ -831,7 +831,7 @@
// can't use either LoadIC or KeyedLoadIC constructors.
IC ic(IC::NO_EXTRA_FRAME);
ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
- if (!ic.is_contextual()) return Heap::undefined_value();
+ if (!ic.SlowIsContextual()) return Heap::undefined_value();
// Throw a reference error.
HandleScope scope;
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 788c532..2418c1f 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -405,7 +405,7 @@
String* name,
Label* miss);
- void GenerateLoadCallback(JSObject* object,
+ bool GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
@@ -413,7 +413,8 @@
Register scratch2,
AccessorInfo* callback,
String* name,
- Label* miss);
+ Label* miss,
+ Failure** failure);
void GenerateLoadConstant(JSObject* object,
JSObject* holder,
@@ -447,10 +448,10 @@
JSObject* holder,
int index,
String* name);
- Object* CompileLoadCallback(JSObject* object,
+ Object* CompileLoadCallback(String* name,
+ JSObject* object,
JSObject* holder,
- AccessorInfo* callback,
- String* name);
+ AccessorInfo* callback);
Object* CompileLoadConstant(JSObject* object,
JSObject* holder,
Object* value,
diff --git a/src/token.cc b/src/token.cc
index 0a4ad4c..8cee99b 100644
--- a/src/token.cc
+++ b/src/token.cc
@@ -32,13 +32,11 @@
namespace v8 {
namespace internal {
-#ifdef DEBUG
#define T(name, string, precedence) #name,
const char* Token::name_[NUM_TOKENS] = {
TOKEN_LIST(T, T, IGNORE_TOKEN)
};
#undef T
-#endif
#define T(name, string, precedence) string,
diff --git a/src/token.h b/src/token.h
index a60704c..2a228d6 100644
--- a/src/token.h
+++ b/src/token.h
@@ -66,8 +66,9 @@
T(DEC, "--", 0) \
\
/* Assignment operators. */ \
- /* IsAssignmentOp() relies on this block of enum values */ \
- /* being contiguous and sorted in the same order! */ \
+ /* IsAssignmentOp() and Assignment::is_compound() relies on */ \
+ /* this block of enum values being contiguous and sorted in the */ \
+ /* same order! */ \
T(INIT_VAR, "=init_var", 2) /* AST-use only. */ \
T(INIT_CONST, "=init_const", 2) /* AST-use only. */ \
T(ASSIGN, "=", 2) \
@@ -211,14 +212,12 @@
};
#undef T
-#ifdef DEBUG
// Returns a string corresponding to the C++ token name
// (e.g. "LT" for the token LT).
static const char* Name(Value tok) {
ASSERT(0 <= tok && tok < NUM_TOKENS);
return name_[tok];
}
-#endif
// Predicates
static bool IsAssignmentOp(Value tok) {
@@ -261,9 +260,7 @@
}
private:
-#ifdef DEBUG
static const char* name_[NUM_TOKENS];
-#endif
static const char* string_[NUM_TOKENS];
static int8_t precedence_[NUM_TOKENS];
};
diff --git a/src/utils.cc b/src/utils.cc
index 08ee16f..374385b 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -40,6 +40,7 @@
// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
// figure 3-3, page 48, where the function is called clp2.
uint32_t RoundUpToPowerOf2(uint32_t x) {
+ ASSERT(x <= 0x80000000u);
x = x - 1;
x = x | (x >> 1);
x = x | (x >> 2);
diff --git a/src/v8-counters.h b/src/v8-counters.h
index d6f53fa..fb1e926 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -74,8 +74,6 @@
SC(objs_since_last_full, V8.ObjsSinceLastFull) \
SC(symbol_table_capacity, V8.SymbolTableCapacity) \
SC(number_of_symbols, V8.NumberOfSymbols) \
- /* Current amount of memory in external string buffers. */ \
- SC(total_external_string_memory, V8.TotalExternalStringMemory) \
SC(script_wrappers, V8.ScriptWrappers) \
SC(call_initialize_stubs, V8.CallInitializeStubs) \
SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \
@@ -155,7 +153,13 @@
SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs) \
SC(string_add_runtime, V8.StringAddRuntime) \
- SC(string_add_native, V8.StringAddNative)
+ SC(string_add_native, V8.StringAddNative) \
+ SC(sub_string_runtime, V8.SubStringRuntime) \
+ SC(sub_string_native, V8.SubStringNative) \
+ SC(string_compare_native, V8.StringCompareNative) \
+ SC(string_compare_runtime, V8.StringCompareRuntime) \
+ SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
+ SC(regexp_entry_native, V8.RegExpEntryNative)
// This file contains all the v8 counters that are in use.
class Counters : AllStatic {
diff --git a/src/v8.cc b/src/v8.cc
index 3bec827..db570a4 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -146,6 +146,7 @@
Heap::TearDown();
Logger::TearDown();
+ Deserializer::TearDown();
is_running_ = false;
has_been_disposed_ = true;
diff --git a/src/v8natives.js b/src/v8natives.js
index 8f9adcb..3dcf430 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -41,6 +41,7 @@
const $isNaN = GlobalIsNaN;
const $isFinite = GlobalIsFinite;
+
// ----------------------------------------------------------------------------
@@ -87,7 +88,7 @@
// ECMA-262 - 15.1.2.2
function GlobalParseInt(string, radix) {
- if (radix === void 0) {
+ if (IS_UNDEFINED(radix)) {
// Some people use parseInt instead of Math.floor. This
// optimization makes parseInt on a Smi 12 times faster (60ns
// vs 800ns). The following optimization makes parseInt on a
@@ -275,11 +276,310 @@
function ObjectKeys(obj) {
if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
- throw MakeTypeError('object_keys_non_object', [obj]);
+ throw MakeTypeError("obj_ctor_property_non_object", ["keys"]);
return %LocalKeys(obj);
}
+// ES5 8.10.1.
+function IsAccessorDescriptor(desc) {
+ if (IS_UNDEFINED(desc)) return false;
+ return desc.hasGetter_ || desc.hasSetter_;
+}
+
+
+// ES5 8.10.2.
+function IsDataDescriptor(desc) {
+ if (IS_UNDEFINED(desc)) return false;
+ return desc.hasValue_ || desc.hasWritable_;
+}
+
+
+// ES5 8.10.3.
+function IsGenericDescriptor(desc) {
+ return !(IsAccessorDescriptor(desc) || IsDataDescriptor(desc));
+}
+
+
+function IsInconsistentDescriptor(desc) {
+ return IsAccessorDescriptor(desc) && IsDataDescriptor(desc);
+}
+
+// ES5 8.10.4
+function FromPropertyDescriptor(desc) {
+ if(IS_UNDEFINED(desc)) return desc;
+ var obj = new $Object();
+ if (IsDataDescriptor(desc)) {
+ obj.value = desc.getValue();
+ obj.writable = desc.isWritable();
+ }
+ if (IsAccessorDescriptor(desc)) {
+ obj.get = desc.getGet();
+ obj.set = desc.getSet();
+ }
+ obj.enumerable = desc.isEnumerable();
+ obj.configurable = desc.isConfigurable();
+ return obj;
+}
+
+// ES5 8.10.5.
+function ToPropertyDescriptor(obj) {
+ if (!IS_OBJECT(obj)) {
+ throw MakeTypeError("property_desc_object", [obj]);
+ }
+ var desc = new PropertyDescriptor();
+
+ if ("enumerable" in obj) {
+ desc.setEnumerable(ToBoolean(obj.enumerable));
+ }
+
+
+ if ("configurable" in obj) {
+ desc.setConfigurable(ToBoolean(obj.configurable));
+ }
+
+ if ("value" in obj) {
+ desc.setValue(obj.value);
+ }
+
+ if ("writable" in obj) {
+ desc.setWritable(ToBoolean(obj.writable));
+ }
+
+ if ("get" in obj) {
+ var get = obj.get;
+ if (!IS_UNDEFINED(get) && !IS_FUNCTION(get)) {
+ throw MakeTypeError("getter_must_be_callable", [get]);
+ }
+ desc.setGet(get);
+ }
+
+ if ("set" in obj) {
+ var set = obj.set;
+ if (!IS_UNDEFINED(set) && !IS_FUNCTION(set)) {
+ throw MakeTypeError("setter_must_be_callable", [set]);
+ }
+ desc.setSet(set);
+ }
+
+ if (IsInconsistentDescriptor(desc)) {
+ throw MakeTypeError("value_and_accessor", [obj]);
+ }
+ return desc;
+}
+
+
+function PropertyDescriptor() {
+ // Initialize here so they are all in-object and have the same map.
+ // Default values from ES5 8.6.1.
+ this.value_ = void 0;
+ this.hasValue_ = false;
+ this.writable_ = false;
+ this.hasWritable_ = false;
+ this.enumerable_ = false;
+ this.configurable_ = false;
+ this.get_ = void 0;
+ this.hasGetter_ = false;
+ this.set_ = void 0;
+ this.hasSetter_ = false;
+}
+
+
+PropertyDescriptor.prototype.setValue = function(value) {
+ this.value_ = value;
+ this.hasValue_ = true;
+}
+
+
+PropertyDescriptor.prototype.getValue = function() {
+ return this.value_;
+}
+
+
+PropertyDescriptor.prototype.setEnumerable = function(enumerable) {
+ this.enumerable_ = enumerable;
+}
+
+
+PropertyDescriptor.prototype.isEnumerable = function () {
+ return this.enumerable_;
+}
+
+
+PropertyDescriptor.prototype.setWritable = function(writable) {
+ this.writable_ = writable;
+ this.hasWritable_ = true;
+}
+
+
+PropertyDescriptor.prototype.isWritable = function() {
+ return this.writable_;
+}
+
+
+PropertyDescriptor.prototype.setConfigurable = function(configurable) {
+ this.configurable_ = configurable;
+}
+
+
+PropertyDescriptor.prototype.isConfigurable = function() {
+ return this.configurable_;
+}
+
+
+PropertyDescriptor.prototype.setGet = function(get) {
+ this.get_ = get;
+ this.hasGetter_ = true;
+}
+
+
+PropertyDescriptor.prototype.getGet = function() {
+ return this.get_;
+}
+
+
+PropertyDescriptor.prototype.setSet = function(set) {
+ this.set_ = set;
+ this.hasSetter_ = true;
+}
+
+
+PropertyDescriptor.prototype.getSet = function() {
+ return this.set_;
+}
+
+
+// ES5 section 8.12.1.
+function GetOwnProperty(obj, p) {
+ var desc = new PropertyDescriptor();
+
+ // An array with:
+ // obj is a data property [false, value, Writeable, Enumerable, Configurable]
+ // obj is an accessor [true, Get, Set, Enumerable, Configurable]
+ var props = %GetOwnProperty(ToObject(obj), ToString(p));
+
+ if (IS_UNDEFINED(props))
+ return void 0;
+
+ // This is an accessor
+ if (props[0]) {
+ desc.setGet(props[1]);
+ desc.setSet(props[2]);
+ } else {
+ desc.setValue(props[1]);
+ desc.setWritable(props[2]);
+ }
+ desc.setEnumerable(props[3]);
+ desc.setConfigurable(props[4]);
+
+ return desc;
+}
+
+
+// ES5 8.12.9. This version cannot cope with the property p already
+// being present on obj.
+function DefineOwnProperty(obj, p, desc, should_throw) {
+ var flag = desc.isEnumerable() ? 0 : DONT_ENUM;
+ if (IsDataDescriptor(desc)) {
+ flag |= desc.isWritable() ? 0 : (DONT_DELETE | READ_ONLY);
+ %SetProperty(obj, p, desc.getValue(), flag);
+ } else {
+ if (IS_FUNCTION(desc.getGet())) %DefineAccessor(obj, p, GETTER, desc.getGet(), flag);
+ if (IS_FUNCTION(desc.getSet())) %DefineAccessor(obj, p, SETTER, desc.getSet(), flag);
+ }
+ return true;
+}
+
+
+// ES5 section 15.2.3.2.
+function ObjectGetPrototypeOf(obj) {
+ if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+ throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]);
+ return obj.__proto__;
+}
+
+
+// ES5 section 15.2.3.3
+function ObjectGetOwnPropertyDescriptor(obj, p) {
+ if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+ throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyDescriptor"]);
+ var desc = GetOwnProperty(obj, p);
+ return FromPropertyDescriptor(desc);
+}
+
+
+// ES5 section 15.2.3.4.
+function ObjectGetOwnPropertyNames(obj) {
+ if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj))
+ throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]);
+
+ // Find all the indexed properties.
+
+ // Get the local element names.
+ var propertyNames = %GetLocalElementNames(obj);
+
+ // Get names for indexed interceptor properties.
+ if (%GetInterceptorInfo(obj) & 1) {
+ var indexedInterceptorNames =
+ %GetIndexedInterceptorElementNames(obj);
+ if (indexedInterceptorNames) {
+ propertyNames = propertyNames.concat(indexedInterceptorNames);
+ }
+ }
+
+ // Find all the named properties.
+
+ // Get the local property names.
+ propertyNames = propertyNames.concat(%GetLocalPropertyNames(obj));
+
+ // Get names for named interceptor properties if any.
+
+ if (%GetInterceptorInfo(obj) & 2) {
+ var namedInterceptorNames =
+ %GetNamedInterceptorPropertyNames(obj);
+ if (namedInterceptorNames) {
+ propertyNames = propertyNames.concat(namedInterceptorNames);
+ }
+ }
+
+ return propertyNames;
+}
+
+
+// ES5 section 15.2.3.5.
+function ObjectCreate(proto, properties) {
+ if (!IS_OBJECT(proto) && !IS_NULL(proto)) {
+ throw MakeTypeError("proto_object_or_null", [proto]);
+ }
+ var obj = new $Object();
+ obj.__proto__ = proto;
+ if (!IS_UNDEFINED(properties)) ObjectDefineProperties(obj, properties);
+ return obj;
+}
+
+
+// ES5 section 15.2.3.7. This version cannot cope with the properies already
+// being present on obj. Therefore it is not exposed as
+// Object.defineProperties yet.
+function ObjectDefineProperties(obj, properties) {
+ var props = ToObject(properties);
+ var key_values = [];
+ for (var key in props) {
+ if (%HasLocalProperty(props, key)) {
+ key_values.push(key);
+ var value = props[key];
+ var desc = ToPropertyDescriptor(value);
+ key_values.push(desc);
+ }
+ }
+ for (var i = 0; i < key_values.length; i += 2) {
+ var key = key_values[i];
+ var desc = key_values[i + 1];
+ DefineOwnProperty(obj, key, desc, true);
+ }
+}
+
+
%SetCode($Object, function(x) {
if (%_IsConstructCall()) {
if (x == null) return this;
@@ -309,7 +609,11 @@
"__lookupSetter__", ObjectLookupSetter
));
InstallFunctions($Object, DONT_ENUM, $Array(
- "keys", ObjectKeys
+ "keys", ObjectKeys,
+ "create", ObjectCreate,
+ "getPrototypeOf", ObjectGetPrototypeOf,
+ "getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
+ "getOwnPropertyNames", ObjectGetOwnPropertyNames
));
}
diff --git a/src/variables.cc b/src/variables.cc
index d9a78a5..3bcd48a 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -86,10 +86,10 @@
// ----------------------------------------------------------------------------
-// Implementation SmiAnalysis.
+// Implementation StaticType.
-const char* SmiAnalysis::Type2String(SmiAnalysis* type) {
+const char* StaticType::Type2String(StaticType* type) {
switch (type->kind_) {
case UNKNOWN:
return "UNKNOWN";
diff --git a/src/variables.h b/src/variables.h
index ca78b5f..ac7f294 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -65,14 +65,14 @@
// Variables and AST expression nodes can track their "type" to enable
// optimizations and removal of redundant checks when generating code.
-class SmiAnalysis {
+class StaticType {
public:
enum Kind {
UNKNOWN,
LIKELY_SMI
};
- SmiAnalysis() : kind_(UNKNOWN) {}
+ StaticType() : kind_(UNKNOWN) {}
bool Is(Kind kind) const { return kind_ == kind; }
@@ -80,11 +80,11 @@
bool IsUnknown() const { return Is(UNKNOWN); }
bool IsLikelySmi() const { return Is(LIKELY_SMI); }
- void CopyFrom(SmiAnalysis* other) {
+ void CopyFrom(StaticType* other) {
kind_ = other->kind_;
}
- static const char* Type2String(SmiAnalysis* type);
+ static const char* Type2String(StaticType* type);
// LIKELY_SMI accessors
void SetAsLikelySmi() {
@@ -100,7 +100,7 @@
private:
Kind kind_;
- DISALLOW_COPY_AND_ASSIGN(SmiAnalysis);
+ DISALLOW_COPY_AND_ASSIGN(StaticType);
};
@@ -203,7 +203,7 @@
Expression* rewrite() const { return rewrite_; }
Slot* slot() const;
- SmiAnalysis* type() { return &type_; }
+ StaticType* type() { return &type_; }
private:
Scope* scope_;
@@ -220,7 +220,7 @@
UseCount obj_uses_; // uses of the object the variable points to
// Static type information
- SmiAnalysis type_;
+ StaticType type_;
// Code generation.
// rewrite_ is usually a Slot or a Property, but may be any expression.
diff --git a/src/version.cc b/src/version.cc
index 3611d44..2724f6e 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 0
-#define BUILD_NUMBER 4
+#define BUILD_NUMBER 7
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION true
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 2d524ea..4ac3933 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -1880,6 +1880,20 @@
}
+void Assembler::testb(const Operand& op, Register reg) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (reg.code() > 3) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(reg, op);
+ } else {
+ emit_optional_rex_32(reg, op);
+ }
+ emit(0x84);
+ emit_operand(reg, op);
+}
+
+
void Assembler::testl(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index fa7d33b..1bddb2f 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -916,6 +916,10 @@
arithmetic_op_32(0x2B, dst, src);
}
+ void subl(Register dst, const Operand& src) {
+ arithmetic_op_32(0x2B, dst, src);
+ }
+
void subl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x5, dst, src);
}
@@ -931,6 +935,7 @@
void testb(Register dst, Register src);
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
+ void testb(const Operand& op, Register reg);
void testl(Register dst, Register src);
void testl(Register reg, Immediate mask);
void testl(const Operand& op, Immediate mask);
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index f444d2c..0b95bba 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -34,16 +34,36 @@
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
- // TODO(428): Don't pass the function in a static variable.
- ExternalReference passed = ExternalReference::builtin_passed_function();
- __ movq(kScratchRegister, passed.address(), RelocInfo::EXTERNAL_REFERENCE);
- __ movq(Operand(kScratchRegister, 0), rdi);
- // The actual argument count has already been loaded into register
- // rax, but JumpToRuntime expects rax to contain the number of
- // arguments including the receiver.
- __ incq(rax);
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ CFunctionId id,
+ BuiltinExtraArguments extra_args) {
+ // ----------- S t a t e -------------
+ // -- rax : number of arguments excluding receiver
+ // -- rdi : called function (only guaranteed when
+ // extra_args requires it)
+ // -- rsi : context
+ // -- rsp[0] : return address
+ // -- rsp[8] : last argument
+ // -- ...
+ // -- rsp[8 * argc] : first argument (argc == rax)
+ // -- rsp[8 * (argc +1)] : receiver
+ // -----------------------------------
+
+ // Insert extra arguments.
+ int num_extra_args = 0;
+ if (extra_args == NEEDS_CALLED_FUNCTION) {
+ num_extra_args = 1;
+ __ pop(kScratchRegister); // Save return address.
+ __ push(rdi);
+ __ push(kScratchRegister); // Restore return address.
+ } else {
+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+ }
+
+ // JumpToRuntime expects rax to contain the number of arguments
+ // including the receiver and the extra arguments.
+ __ addq(rax, Immediate(num_extra_args + 1));
__ JumpToRuntime(ExternalReference(id), 1);
}
@@ -888,7 +908,8 @@
}
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function) {
// Enter a construct frame.
__ EnterConstructFrame();
@@ -1091,8 +1112,17 @@
__ j(greater_equal, &loop);
// Call the function.
- ParameterCount actual(rax);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ if (is_api_function) {
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ Handle<Code> code = Handle<Code>(
+ Builtins::builtin(Builtins::HandleApiCallConstruct));
+ ParameterCount expected(0);
+ __ InvokeCode(code, expected, expected,
+ RelocInfo::CODE_TARGET, CALL_FUNCTION);
+ } else {
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ }
// Restore context from the frame.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -1129,6 +1159,16 @@
}
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, true);
+}
+
+
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Expects five C++ function parameters.
diff --git a/src/x64/codegen-x64-inl.h b/src/x64/codegen-x64-inl.h
index 6869fc9..60e9ab0 100644
--- a/src/x64/codegen-x64-inl.h
+++ b/src/x64/codegen-x64-inl.h
@@ -39,16 +39,6 @@
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- GenerateFastMathOp(SIN, args);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- GenerateFastMathOp(COS, args);
-}
-
#undef __
} } // namespace v8::internal
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 36f0e63..0cf68eb 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -326,12 +326,19 @@
function_return_is_shadowed_ = false;
// Allocate the local context if needed.
- if (scope_->num_heap_slots() > 0) {
+ int heap_slots = scope_->num_heap_slots();
+ if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Allocate local context.
// Get outer context and create a new context based on it.
frame_->PushFunction();
- Result context = frame_->CallRuntime(Runtime::kNewContext, 1);
+ Result context;
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ context = frame_->CallStub(&stub, 1);
+ } else {
+ context = frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
// Update context local.
frame_->SaveContextRegister();
@@ -393,6 +400,12 @@
StoreArgumentsObject(true);
}
+ // Initialize ThisFunction reference if present.
+ if (scope_->is_function_scope() && scope_->function() != NULL) {
+ frame_->Push(Factory::the_hole_value());
+ StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ }
+
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
@@ -802,7 +815,7 @@
frame_->Push(&fn);
frame_->Push(&a1);
frame_->Push(&a2);
- CallFunctionStub call_function(2, NOT_IN_LOOP);
+ CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
Result res = frame_->CallStub(&call_function, 3);
frame_->Push(&res);
@@ -1865,13 +1878,9 @@
frame_->EmitPush(rax);
// Store the caught exception in the catch variable.
- { Reference ref(this, node->catch_var());
- ASSERT(ref.is_slot());
- // Load the exception to the top of the stack. Here we make use of the
- // convenient property that it doesn't matter whether a value is
- // immediately on top of or underneath a zero-sized reference.
- ref.SetValue(NOT_CONST_INIT);
- }
+ Variable* catch_var = node->catch_var()->var();
+ ASSERT(catch_var != NULL && catch_var->slot() != NULL);
+ StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
// Remove the exception from the stack.
frame_->Drop();
@@ -2196,19 +2205,28 @@
void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
- // Call the runtime to instantiate the function boilerplate object.
+ ASSERT(boilerplate->IsBoilerplate());
+
// The inevitable call will sync frame elements to memory anyway, so
// we do it eagerly to allow us to push the arguments directly into
// place.
- ASSERT(boilerplate->IsBoilerplate());
frame_->SyncRange(0, frame_->element_count() - 1);
- // Create a new closure.
- frame_->EmitPush(rsi);
- __ movq(kScratchRegister, boilerplate, RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(kScratchRegister);
- Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
- frame_->Push(&result);
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
+ FastNewClosureStub stub;
+ frame_->Push(boilerplate);
+ Result answer = frame_->CallStub(&stub, 1);
+ frame_->Push(&answer);
+ } else {
+ // Call the runtime to instantiate the function boilerplate
+ // object.
+ frame_->EmitPush(rsi);
+ frame_->EmitPush(boilerplate);
+ Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
+ frame_->Push(&result);
+ }
}
@@ -2362,46 +2380,10 @@
}
-// Materialize the object literal 'node' in the literals array
-// 'literals' of the function. Leave the object boilerplate in
-// 'boilerplate'.
-class DeferredObjectLiteral: public DeferredCode {
- public:
- DeferredObjectLiteral(Register boilerplate,
- Register literals,
- ObjectLiteral* node)
- : boilerplate_(boilerplate), literals_(literals), node_(node) {
- set_comment("[ DeferredObjectLiteral");
- }
-
- void Generate();
-
- private:
- Register boilerplate_;
- Register literals_;
- ObjectLiteral* node_;
-};
-
-
-void DeferredObjectLiteral::Generate() {
- // Since the entry is undefined we call the runtime system to
- // compute the literal.
- // Literal array (0).
- __ push(literals_);
- // Literal index (1).
- __ Push(Smi::FromInt(node_->literal_index()));
- // Constant properties (2).
- __ Push(node_->constant_properties());
- __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
- if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
-}
-
-
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Comment cmnt(masm_, "[ ObjectLiteral");
- // Retrieve the literals array and check the allocated entry. Begin
- // with a writable copy of the function of this activation in a
+ // Load a writable copy of the function of this activation in a
// register.
frame_->PushFunction();
Result literals = frame_->Pop();
@@ -2411,32 +2393,18 @@
// Load the literals array of the function.
__ movq(literals.reg(),
FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- Result boilerplate = allocator_->Allocate();
- ASSERT(boilerplate.is_valid());
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
- // Check whether we need to materialize the object literal boilerplate.
- // If so, jump to the deferred code passing the literals array.
- DeferredObjectLiteral* deferred =
- new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
- __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
- deferred->Branch(equal);
- deferred->BindExit();
- literals.Unuse();
-
- // Push the boilerplate object.
- frame_->Push(&boilerplate);
- // Clone the boilerplate object.
- Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
- if (node->depth() == 1) {
- clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ // Literal array.
+ frame_->Push(&literals);
+ // Literal index.
+ frame_->Push(Smi::FromInt(node->literal_index()));
+ // Constant properties.
+ frame_->Push(node->constant_properties());
+ Result clone;
+ if (node->depth() > 1) {
+ clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ } else {
+ clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
}
- Result clone = frame_->CallRuntime(clone_function_id, 1);
- // Push the newly cloned literal object as the result.
frame_->Push(&clone);
for (int i = 0; i < node->properties()->length(); i++) {
@@ -2496,45 +2464,10 @@
}
-// Materialize the array literal 'node' in the literals array 'literals'
-// of the function. Leave the array boilerplate in 'boilerplate'.
-class DeferredArrayLiteral: public DeferredCode {
- public:
- DeferredArrayLiteral(Register boilerplate,
- Register literals,
- ArrayLiteral* node)
- : boilerplate_(boilerplate), literals_(literals), node_(node) {
- set_comment("[ DeferredArrayLiteral");
- }
-
- void Generate();
-
- private:
- Register boilerplate_;
- Register literals_;
- ArrayLiteral* node_;
-};
-
-
-void DeferredArrayLiteral::Generate() {
- // Since the entry is undefined we call the runtime system to
- // compute the literal.
- // Literal array (0).
- __ push(literals_);
- // Literal index (1).
- __ Push(Smi::FromInt(node_->literal_index()));
- // Constant properties (2).
- __ Push(node_->literals());
- __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
- if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
-}
-
-
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
Comment cmnt(masm_, "[ ArrayLiteral");
- // Retrieve the literals array and check the allocated entry. Begin
- // with a writable copy of the function of this activation in a
+ // Load a writable copy of the function of this activation in a
// register.
frame_->PushFunction();
Result literals = frame_->Pop();
@@ -2544,32 +2477,18 @@
// Load the literals array of the function.
__ movq(literals.reg(),
FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- Result boilerplate = allocator_->Allocate();
- ASSERT(boilerplate.is_valid());
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
- // Check whether we need to materialize the object literal boilerplate.
- // If so, jump to the deferred code passing the literals array.
- DeferredArrayLiteral* deferred =
- new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
- __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
- deferred->Branch(equal);
- deferred->BindExit();
- literals.Unuse();
-
- // Push the resulting array literal boilerplate on the stack.
- frame_->Push(&boilerplate);
- // Clone the boilerplate object.
- Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
- if (node->depth() == 1) {
- clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ // Literal array.
+ frame_->Push(&literals);
+ // Literal index.
+ frame_->Push(Smi::FromInt(node->literal_index()));
+ // Constant elements.
+ frame_->Push(node->constant_elements());
+ Result clone;
+ if (node->depth() > 1) {
+ clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
+ } else {
+ clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
}
- Result clone = frame_->CallRuntime(clone_function_id, 1);
- // Push the newly cloned literal object as the result.
frame_->Push(&clone);
// Generate code to set the elements in the array that are not
@@ -2770,28 +2689,24 @@
frame_->Push(Factory::undefined_value());
}
+ // Push the receiver.
+ frame_->PushParameterAt(-1);
+
// Resolve the call.
Result result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
- // Touch up the stack with the right values for the function and the
- // receiver. Use a scratch register to avoid destroying the result.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ movq(scratch.reg(),
- FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
- frame_->SetElementAt(arg_count + 1, &scratch);
-
- // We can reuse the result register now.
- frame_->Spill(result.reg());
- __ movq(result.reg(),
- FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
- frame_->SetElementAt(arg_count, &result);
+ // The runtime call returns a pair of values in rax (function) and
+ // rdx (receiver). Touch up the stack with the right values.
+ Result receiver = allocator_->Allocate(rdx);
+ frame_->SetElementAt(arg_count + 1, &result);
+ frame_->SetElementAt(arg_count, &receiver);
+ receiver.Unuse();
// Call the function.
CodeForSourcePosition(node->position());
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
+ CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
result = frame_->CallStub(&call_function, arg_count + 1);
// Restore the context and overwrite the function on the stack with
@@ -2852,7 +2767,7 @@
frame_->EmitPush(rdx);
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
} else if (property != NULL) {
// Check if the key is a literal string.
@@ -2917,7 +2832,7 @@
}
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
}
} else {
@@ -2932,7 +2847,7 @@
LoadGlobalReceiver();
// Call the function.
- CallWithArguments(args, node->position());
+ CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
}
}
@@ -3109,7 +3024,7 @@
bool overwrite =
(node->expression()->AsBinaryOperation() != NULL &&
node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- UnarySubStub stub(overwrite);
+ GenericUnaryOpStub stub(Token::SUB, overwrite);
// TODO(1222589): remove dependency of TOS being cached inside stub
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
@@ -3979,69 +3894,16 @@
}
-void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
- JumpTarget done;
- JumpTarget call_runtime;
- ASSERT(args->length() == 1);
+void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 4);
- // Load number and duplicate it.
+ // Load the arguments on the stack and call the runtime system.
Load(args->at(0));
- frame_->Dup();
-
- // Get the number into an unaliased register and load it onto the
- // floating point stack still leaving one copy on the frame.
- Result number = frame_->Pop();
- number.ToRegister();
- frame_->Spill(number.reg());
- FloatingPointHelper::LoadFloatOperand(masm_, number.reg());
- number.Unuse();
-
- // Perform the operation on the number.
- switch (op) {
- case SIN:
- __ fsin();
- break;
- case COS:
- __ fcos();
- break;
- }
-
- // Go slow case if argument to operation is out of range.
- Result eax_reg = allocator()->Allocate(rax);
- ASSERT(eax_reg.is_valid());
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x0400)); // Bit 10 is condition flag C2.
- eax_reg.Unuse();
- call_runtime.Branch(not_zero);
-
- // Allocate heap number for result if possible.
- Result scratch = allocator()->Allocate();
- Result heap_number = allocator()->Allocate();
- __ AllocateHeapNumber(heap_number.reg(),
- scratch.reg(),
- call_runtime.entry_label());
- scratch.Unuse();
-
- // Store the result in the allocated heap number.
- __ fstp_d(FieldOperand(heap_number.reg(), HeapNumber::kValueOffset));
- // Replace the extra copy of the argument with the result.
- frame_->SetElementAt(0, &heap_number);
- done.Jump();
-
- call_runtime.Bind();
- // Free ST(0) which was not popped before calling into the runtime.
- __ ffree(0);
- Result answer;
- switch (op) {
- case SIN:
- answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
- break;
- case COS:
- answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
- break;
- }
- frame_->Push(&answer);
- done.Bind();
+ Load(args->at(1));
+ Load(args->at(2));
+ Load(args->at(3));
+ Result result = frame_->CallRuntime(Runtime::kRegExpExec, 4);
+ frame_->Push(&result);
}
@@ -4051,7 +3913,32 @@
Load(args->at(0));
Load(args->at(1));
- Result answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
+}
+
+
+void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(3, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+
+ Result answer = frame_->CallRuntime(Runtime::kSubString, 3);
+ frame_->Push(&answer);
+}
+
+
+void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ StringCompareStub stub;
+ Result answer = frame_->CallStub(&stub, 2);
frame_->Push(&answer);
}
@@ -4379,15 +4266,7 @@
// The expression is either a property or a variable proxy that rewrites
// to a property.
Load(property->obj());
- // We use a named reference if the key is a literal symbol, unless it is
- // a string that can be legally parsed as an integer. This is because
- // otherwise we will not get into the slow case code that handles [] on
- // String objects.
- Literal* literal = property->key()->AsLiteral();
- uint32_t dummy;
- if (literal != NULL &&
- literal->handle()->IsSymbol() &&
- !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+ if (property->key()->IsPropertyName()) {
ref->set_type(Reference::NAMED);
} else {
Load(property->key());
@@ -4863,36 +4742,34 @@
frame_->Push(&result);
}
- { Reference shadow_ref(this, scope_->arguments_shadow());
- Reference arguments_ref(this, scope_->arguments());
- ASSERT(shadow_ref.is_slot() && arguments_ref.is_slot());
- // Here we rely on the convenient property that references to slot
- // take up zero space in the frame (ie, it doesn't matter that the
- // stored value is actually below the reference on the frame).
- JumpTarget done;
- bool skip_arguments = false;
- if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has
- // already been written to. This can happen if the a function
- // has a local variable named 'arguments'.
- LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
- Result arguments = frame_->Pop();
- if (arguments.is_constant()) {
- // We have to skip updating the arguments object if it has
- // been assigned a proper value.
- skip_arguments = !arguments.handle()->IsTheHole();
- } else {
- __ CompareRoot(arguments.reg(), Heap::kTheHoleValueRootIndex);
- arguments.Unuse();
- done.Branch(not_equal);
- }
+
+ Variable* arguments = scope_->arguments()->var();
+ Variable* shadow = scope_->arguments_shadow()->var();
+ ASSERT(arguments != NULL && arguments->slot() != NULL);
+ ASSERT(shadow != NULL && shadow->slot() != NULL);
+ JumpTarget done;
+ bool skip_arguments = false;
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
+ // We have to skip storing into the arguments slot if it has
+ // already been written to. This can happen if the a function
+ // has a local variable named 'arguments'.
+ LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ Result probe = frame_->Pop();
+ if (probe.is_constant()) {
+ // We have to skip updating the arguments object if it has been
+ // assigned a proper value.
+ skip_arguments = !probe.handle()->IsTheHole();
+ } else {
+ __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
+ probe.Unuse();
+ done.Branch(not_equal);
}
- if (!skip_arguments) {
- arguments_ref.SetValue(NOT_CONST_INIT);
- if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
- }
- shadow_ref.SetValue(NOT_CONST_INIT);
}
+ if (!skip_arguments) {
+ StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+ if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+ }
+ StoreToSlot(shadow->slot(), NOT_CONST_INIT);
return frame_->Pop();
}
@@ -5126,7 +5003,7 @@
void CodeGenerator::GenericBinaryOperation(Token::Value op,
- SmiAnalysis* type,
+ StaticType* type,
OverwriteMode overwrite_mode) {
Comment cmnt(masm_, "[ BinaryOperation");
Comment cmnt_token(masm_, Token::String(op));
@@ -5315,7 +5192,7 @@
void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> value,
- SmiAnalysis* type,
+ StaticType* type,
bool reversed,
OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
@@ -6098,7 +5975,7 @@
// a loop and the key is likely to be a smi.
Property* property = expression()->AsProperty();
ASSERT(property != NULL);
- SmiAnalysis* key_smi_analysis = property->key()->type();
+ StaticType* key_smi_analysis = property->key()->type();
if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
Comment cmnt(masm, "[ Inlined store to keyed Property");
@@ -6198,6 +6075,91 @@
}
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+ // Clone the boilerplate in new space. Set the context to the
+ // current context in rsi.
+ Label gc;
+ __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
+
+ // Get the boilerplate function from the stack.
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+
+ // Compute the function map in the current global context and set that
+ // as the map of the allocated object.
+ __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+ __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
+
+ // Clone the rest of the boilerplate fields. We don't have to update
+ // the write barrier because the allocated object is in new space.
+ for (int offset = kPointerSize;
+ offset < JSFunction::kSize;
+ offset += kPointerSize) {
+ if (offset == JSFunction::kContextOffset) {
+ __ movq(FieldOperand(rax, offset), rsi);
+ } else {
+ __ movq(rbx, FieldOperand(rdx, offset));
+ __ movq(FieldOperand(rax, offset), rbx);
+ }
+ }
+
+ // Return and remove the on-stack parameter.
+ __ ret(1 * kPointerSize);
+
+ // Create a new closure through the slower runtime call.
+ __ bind(&gc);
+ __ pop(rcx); // Temporarily remove return address.
+ __ pop(rdx);
+ __ push(rsi);
+ __ push(rdx);
+ __ push(rcx); // Restore return address.
+ __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
+ rax, rbx, rcx, &gc, TAG_OBJECT);
+
+ // Get the function from the stack.
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+
+ // Setup the object header.
+ __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
+ __ movl(FieldOperand(rax, Array::kLengthOffset), Immediate(length));
+
+ // Setup the fixed slots.
+ __ xor_(rbx, rbx); // Set to NULL.
+ __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
+ __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
+ __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
+ __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
+
+ // Copy the global object from the surrounding context.
+ __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
+
+ // Initialize the rest of the slots to undefined.
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
+ }
+
+ // Return and remove the on-stack parameter.
+ __ movq(rsi, rax);
+ __ ret(1 * kPointerSize);
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
+}
+
+
void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result, true_result, not_string;
__ movq(rax, Operand(rsp, 1 * kPointerSize));
@@ -6337,7 +6299,9 @@
// End of CodeGenerator implementation.
-void UnarySubStub::Generate(MacroAssembler* masm) {
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ ASSERT(op_ == Token::SUB);
+
Label slow;
Label done;
Label try_float;
@@ -6405,34 +6369,39 @@
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
- Label return_equal;
- Label heap_number;
- // If it's not a heap number, then return equal.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(equal, &heap_number);
- __ bind(&return_equal);
- __ xor_(rax, rax);
- __ ret(0);
+ if (never_nan_nan_) {
+ __ xor_(rax, rax);
+ __ ret(0);
+ } else {
+ Label return_equal;
+ Label heap_number;
+ // If it's not a heap number, then return equal.
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ __ j(equal, &heap_number);
+ __ bind(&return_equal);
+ __ xor_(rax, rax);
+ __ ret(0);
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // We only allow QNaNs, which have bit 51 set (which also rules out
- // the value being Infinity).
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if
+ // it's not NaN.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // We only allow QNaNs, which have bit 51 set (which also rules out
+ // the value being Infinity).
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
- // all bits in the mask are set. We only need to check the word
- // that contains the exponent and high bit of the mantissa.
- ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
- __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
- __ xorl(rax, rax);
- __ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
- __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
- __ setcc(above_equal, rax);
- __ ret(0);
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+ // all bits in the mask are set. We only need to check the word
+ // that contains the exponent and high bit of the mantissa.
+ ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
+ __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
+ __ xorl(rax, rax);
+ __ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
+ __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
+ __ setcc(above_equal, rax);
+ __ ret(0);
+ }
__ bind(¬_identical);
}
@@ -6528,9 +6497,10 @@
// Fast negative check for symbol-to-symbol equality.
__ bind(&check_for_symbols);
+ Label check_for_strings;
if (cc_ == equal) {
- BranchIfNonSymbol(masm, &call_builtin, rax, kScratchRegister);
- BranchIfNonSymbol(masm, &call_builtin, rdx, kScratchRegister);
+ BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
+ BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
// We've already checked for object identity, so if both operands
// are symbols they aren't equal. Register eax (not rax) already holds a
@@ -6538,6 +6508,23 @@
__ ret(2 * kPointerSize);
}
+ __ bind(&check_for_strings);
+
+ __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &call_builtin);
+
+ // Inline comparison of ascii strings.
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ rdx,
+ rax,
+ rcx,
+ rbx,
+ rdi,
+ r8);
+
+#ifdef DEBUG
+ __ Abort("Unexpected fall-through from string comparison");
+#endif
+
__ bind(&call_builtin);
// must swap argument order
__ pop(rcx);
@@ -6579,15 +6566,18 @@
__ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
__ movzxbq(scratch,
FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(kIsSymbolMask | kIsNotStringMask));
- __ cmpb(scratch, Immediate(kSymbolTag | kStringTag));
- __ j(not_equal, label);
+ // Ensure that no non-strings have the symbol bit set.
+ ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ ASSERT(kSymbolTag != 0);
+ __ testb(scratch, Immediate(kIsSymbolMask));
+ __ j(zero, label);
}
// Call the function just below TOS on the stack with the given
// arguments. The receiver is the TOS.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+ CallFunctionFlags flags,
int position) {
// Push the arguments ("left-to-right") on the stack.
int arg_count = args->length();
@@ -6600,7 +6590,7 @@
// Use the shared code stub to call the function.
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- CallFunctionStub call_function(arg_count, in_loop);
+ CallFunctionStub call_function(arg_count, in_loop, flags);
Result answer = frame_->CallStub(&call_function, arg_count + 1);
// Restore context and replace function on the stack with the
// result of the stub invocation.
@@ -6760,16 +6750,13 @@
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor);
-
- // Nothing to do: The formal number of parameters has already been
- // passed in register rax by calling function. Just return it.
- __ ret(0);
// Arguments adaptor case: Read the arguments length from the
// adaptor frame and return it.
- __ bind(&adaptor);
- __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ // Otherwise nothing to do: The number of formal parameters has already been
+ // passed in register eax by calling function. Just return it.
+ __ cmovq(equal, rax,
+ Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ ret(0);
}
@@ -7001,6 +6988,32 @@
void CallFunctionStub::Generate(MacroAssembler* masm) {
Label slow;
+ // If the receiver might be a value (string, number or boolean) check for this
+ // and box it if it is.
+ if (ReceiverMightBeValue()) {
+ // Get the receiver from the stack.
+ // +1 ~ return address
+ Label receiver_is_value, receiver_is_js_object;
+ __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
+
+ // Check if receiver is a smi (which is a number value).
+ __ JumpIfSmi(rax, &receiver_is_value);
+
+ // Check if the receiver is a valid JS object.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
+ __ j(above_equal, &receiver_is_js_object);
+
+ // Call the runtime to box the value.
+ __ bind(&receiver_is_value);
+ __ EnterInternalFrame();
+ __ push(rax);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+ __ LeaveInternalFrame();
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
+
+ __ bind(&receiver_is_js_object);
+ }
+
// Get the function to call from the stack.
// +2 ~ receiver, return address
__ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
@@ -7371,19 +7384,28 @@
const char* GenericBinaryOpStub::GetName() {
- switch (op_) {
- case Token::ADD: return "GenericBinaryOpStub_ADD";
- case Token::SUB: return "GenericBinaryOpStub_SUB";
- case Token::MUL: return "GenericBinaryOpStub_MUL";
- case Token::DIV: return "GenericBinaryOpStub_DIV";
- case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
- case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
- case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
- case Token::SAR: return "GenericBinaryOpStub_SAR";
- case Token::SHL: return "GenericBinaryOpStub_SHL";
- case Token::SHR: return "GenericBinaryOpStub_SHR";
- default: return "GenericBinaryOpStub";
+ if (name_ != NULL) return name_;
+ const int len = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(len);
+ if (name_ == NULL) return "OOM";
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
}
+
+ OS::SNPrintF(Vector<char>(name_, len),
+ "GenericBinaryOpStub_%s_%s%s_%s%s_%s",
+ op_name,
+ overwrite_name,
+ (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
+ args_in_registers_ ? "RegArgs" : "StackArgs",
+ args_reversed_ ? "_R" : "",
+ use_sse3_ ? "SSE3" : "SSE2");
+ return name_;
}
@@ -7796,8 +7818,8 @@
__ j(above_equal, &string1);
// First and second argument are strings.
- Runtime::Function* f = Runtime::FunctionForId(Runtime::kStringAdd);
- __ TailCallRuntime(ExternalReference(f), 2, f->result_size);
+ StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+ __ TailCallStub(&stub);
// Only first argument is a string.
__ bind(&string1);
@@ -7875,9 +7897,402 @@
int CompareStub::MinorKey() {
- // Encode the two parameters in a unique 16 bit value.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
- return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
+ // Encode the three parameters in a unique 16 bit value.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
+ int nnn_value = (never_nan_nan_ ? 2 : 0);
+ if (cc_ != equal) nnn_value = 0; // Avoid duplicate stubs.
+ return (static_cast<unsigned>(cc_) << 2) | nnn_value | (strict_ ? 1 : 0);
+}
+
+
+const char* CompareStub::GetName() {
+ switch (cc_) {
+ case less: return "CompareStub_LT";
+ case greater: return "CompareStub_GT";
+ case less_equal: return "CompareStub_LE";
+ case greater_equal: return "CompareStub_GE";
+ case not_equal: {
+ if (strict_) {
+ if (never_nan_nan_) {
+ return "CompareStub_NE_STRICT_NO_NAN";
+ } else {
+ return "CompareStub_NE_STRICT";
+ }
+ } else {
+ if (never_nan_nan_) {
+ return "CompareStub_NE_NO_NAN";
+ } else {
+ return "CompareStub_NE";
+ }
+ }
+ }
+ case equal: {
+ if (strict_) {
+ if (never_nan_nan_) {
+ return "CompareStub_EQ_STRICT_NO_NAN";
+ } else {
+ return "CompareStub_EQ_STRICT";
+ }
+ } else {
+ if (never_nan_nan_) {
+ return "CompareStub_EQ_NO_NAN";
+ } else {
+ return "CompareStub_EQ";
+ }
+ }
+ }
+ default: return "CompareStub";
+ }
+}
+
+
+void StringAddStub::Generate(MacroAssembler* masm) {
+ Label string_add_runtime;
+
+ // Load the two arguments.
+ __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument.
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument.
+
+ // Make sure that both arguments are strings if not known in advance.
+ if (string_check_) {
+ Condition is_smi;
+ is_smi = masm->CheckSmi(rax);
+ __ j(is_smi, &string_add_runtime);
+ __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
+ __ j(above_equal, &string_add_runtime);
+
+ // First argument is a a string, test second.
+ is_smi = masm->CheckSmi(rdx);
+ __ j(is_smi, &string_add_runtime);
+ __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
+ __ j(above_equal, &string_add_runtime);
+ }
+
+ // Both arguments are strings.
+ // rax: first string
+ // rdx: second string
+ // Check if either of the strings are empty. In that case return the other.
+ Label second_not_zero_length, both_not_zero_length;
+ __ movl(rcx, FieldOperand(rdx, String::kLengthOffset));
+ __ testl(rcx, rcx);
+ __ j(not_zero, &second_not_zero_length);
+ // Second string is empty, result is first string which is already in rax.
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&second_not_zero_length);
+ __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
+ __ testl(rbx, rbx);
+ __ j(not_zero, &both_not_zero_length);
+ // First string is empty, result is second string which is in rdx.
+ __ movq(rax, rdx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Both strings are non-empty.
+ // rax: first string
+ // rbx: length of first string
+ // ecx: length of second string
+ // edx: second string
+ // r8: instance type of first string if string check was performed above
+ // r9: instance type of first string if string check was performed above
+ Label string_add_flat_result;
+ __ bind(&both_not_zero_length);
+ // Look at the length of the result of adding the two strings.
+ __ addl(rbx, rcx);
+ // Use the runtime system when adding two one character strings, as it
+ // contains optimizations for this specific case using the symbol table.
+ __ cmpl(rbx, Immediate(2));
+ __ j(equal, &string_add_runtime);
+ // If arguments where known to be strings, maps are not loaded to r8 and r9
+ // by the code above.
+ if (!string_check_) {
+ __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
+ }
+ // Get the instance types of the two strings as they will be needed soon.
+ __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
+ __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
+ // Check if resulting string will be flat.
+ __ cmpl(rbx, Immediate(String::kMinNonFlatLength));
+ __ j(below, &string_add_flat_result);
+ // Handle exceptionally long strings in the runtime system.
+ ASSERT((String::kMaxLength & 0x80000000) == 0);
+ __ cmpl(rbx, Immediate(String::kMaxLength));
+ __ j(above, &string_add_runtime);
+
+ // If result is not supposed to be flat, allocate a cons string object. If
+ // both strings are ascii the result is an ascii cons string.
+ // rax: first string
+ // ebx: length of resulting flat string
+ // rdx: second string
+ // r8: instance type of first string
+ // r9: instance type of second string
+ Label non_ascii, allocated;
+ __ movl(rcx, r8);
+ __ and_(rcx, r9);
+ ASSERT(kStringEncodingMask == kAsciiStringTag);
+ __ testl(rcx, Immediate(kAsciiStringTag));
+ __ j(zero, &non_ascii);
+ // Allocate an acsii cons string.
+ __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
+ __ bind(&allocated);
+ // Fill the fields of the cons string.
+ __ movl(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
+ __ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+ __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
+ __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
+ __ movq(rax, rcx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&non_ascii);
+ // Allocate a two byte cons string.
+ __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
+ __ jmp(&allocated);
+
+ // Handle creating a flat result. First check that both strings are not
+ // external strings.
+ // rax: first string
+ // ebx: length of resulting flat string
+ // rdx: second string
+ // r8: instance type of first string
+ // r9: instance type of first string
+ __ bind(&string_add_flat_result);
+ __ movl(rcx, r8);
+ __ and_(rcx, Immediate(kStringRepresentationMask));
+ __ cmpl(rcx, Immediate(kExternalStringTag));
+ __ j(equal, &string_add_runtime);
+ __ movl(rcx, r9);
+ __ and_(rcx, Immediate(kStringRepresentationMask));
+ __ cmpl(rcx, Immediate(kExternalStringTag));
+ __ j(equal, &string_add_runtime);
+ // Now check if both strings are ascii strings.
+ // rax: first string
+ // ebx: length of resulting flat string
+ // rdx: second string
+ // r8: instance type of first string
+ // r9: instance type of second string
+ Label non_ascii_string_add_flat_result;
+ ASSERT(kStringEncodingMask == kAsciiStringTag);
+ __ testl(r8, Immediate(kAsciiStringTag));
+ __ j(zero, &non_ascii_string_add_flat_result);
+ __ testl(r9, Immediate(kAsciiStringTag));
+ __ j(zero, &string_add_runtime);
+ // Both strings are ascii strings. As they are short they are both flat.
+ __ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
+ // rcx: result string
+ __ movq(rbx, rcx);
+ // Locate first character of result.
+ __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument
+ __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // rax: first char of first argument
+ // rbx: result string
+ // rcx: first character of result
+ // rdx: second string
+ // rdi: length of first argument
+ GenerateCopyCharacters(masm, rcx, rax, rdi, true);
+ // Locate first character of second argument.
+ __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
+ __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // rbx: result string
+ // rcx: next character of result
+ // rdx: first char of second argument
+ // rdi: length of second argument
+ GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
+ __ movq(rax, rbx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Handle creating a flat two byte result.
+ // rax: first string - known to be two byte
+ // rbx: length of resulting flat string
+ // rdx: second string
+ // r8: instance type of first string
+ // r9: instance type of first string
+ __ bind(&non_ascii_string_add_flat_result);
+ __ and_(r9, Immediate(kAsciiStringTag));
+ __ j(not_zero, &string_add_runtime);
+ // Both strings are two byte strings. As they are short they are both
+ // flat.
+ __ AllocateTwoByteString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
+ // rcx: result string
+ __ movq(rbx, rcx);
+ // Locate first character of result.
+ __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Locate first character of first argument.
+ __ movl(rdi, FieldOperand(rax, String::kLengthOffset));
+ __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // rax: first char of first argument
+ // rbx: result string
+ // rcx: first character of result
+ // rdx: second argument
+ // rdi: length of first argument
+ GenerateCopyCharacters(masm, rcx, rax, rdi, false);
+ // Locate first character of second argument.
+ __ movl(rdi, FieldOperand(rdx, String::kLengthOffset));
+ __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // rbx: result string
+ // rcx: next character of result
+ // rdx: first char of second argument
+ // rdi: length of second argument
+ GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
+ __ movq(rax, rbx);
+ __ IncrementCounter(&Counters::string_add_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Just jump to runtime to add the two strings.
+ __ bind(&string_add_runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
+}
+
+
+void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ bool ascii) {
+ Label loop;
+ __ bind(&loop);
+ // This loop just copies one character at a time, as it is only used for very
+ // short strings.
+ if (ascii) {
+ __ movb(kScratchRegister, Operand(src, 0));
+ __ movb(Operand(dest, 0), kScratchRegister);
+ __ addq(src, Immediate(1));
+ __ addq(dest, Immediate(1));
+ } else {
+ __ movzxwl(kScratchRegister, Operand(src, 0));
+ __ movw(Operand(dest, 0), kScratchRegister);
+ __ addq(src, Immediate(2));
+ __ addq(dest, Immediate(2));
+ }
+ __ subl(count, Immediate(1));
+ __ j(not_zero, &loop);
+}
+
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ // Ensure that you can always subtract a string length from a non-negative
+ // number (e.g. another length).
+ ASSERT(String::kMaxLength < 0x7fffffff);
+
+ // Find minimum length and length difference.
+ __ movl(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ movl(scratch4, scratch1);
+ __ subl(scratch4, FieldOperand(right, String::kLengthOffset));
+ // Register scratch4 now holds left.length - right.length.
+ const Register length_difference = scratch4;
+ Label left_shorter;
+ __ j(less, &left_shorter);
+ // The right string isn't longer that the left one.
+ // Get the right string's length by subtracting the (non-negative) difference
+ // from the left string's length.
+ __ subl(scratch1, length_difference);
+ __ bind(&left_shorter);
+ // Register scratch1 now holds Min(left.length, right.length).
+ const Register min_length = scratch1;
+
+ Label compare_lengths;
+ // If min-length is zero, go directly to comparing lengths.
+ __ testl(min_length, min_length);
+ __ j(zero, &compare_lengths);
+
+ // Registers scratch2 and scratch3 are free.
+ Label result_not_equal;
+ Label loop;
+ {
+ // Check characters 0 .. min_length - 1 in a loop.
+ // Use scratch3 as loop index, min_length as limit and scratch2
+ // for computation.
+ const Register index = scratch3;
+ __ movl(index, Immediate(0)); // Index into strings.
+ __ bind(&loop);
+ // Compare characters.
+ // TODO(lrn): Could we load more than one character at a time?
+ __ movb(scratch2, FieldOperand(left,
+ index,
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ // Increment index and use -1 modifier on next load to give
+ // the previous load extra time to complete.
+ __ addl(index, Immediate(1));
+ __ cmpb(scratch2, FieldOperand(right,
+ index,
+ times_1,
+ SeqAsciiString::kHeaderSize - 1));
+ __ j(not_equal, &result_not_equal);
+ __ cmpl(index, min_length);
+ __ j(not_equal, &loop);
+ }
+ // Completed loop without finding different characters.
+ // Compare lengths (precomputed).
+ __ bind(&compare_lengths);
+ __ testl(length_difference, length_difference);
+ __ j(not_zero, &result_not_equal);
+
+ // Result is EQUAL.
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+
+ Label result_greater;
+ __ bind(&result_not_equal);
+ // Unequal comparison of left to right, either character or length.
+ __ j(greater, &result_greater);
+
+ // Result is LESS.
+ __ Move(rax, Smi::FromInt(LESS));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Result is GREATER.
+ __ bind(&result_greater);
+ __ Move(rax, Smi::FromInt(GREATER));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // rsp[0]: return address
+ // rsp[8]: right string
+ // rsp[16]: left string
+
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
+ __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
+
+ // Check for identity.
+ Label not_same;
+ __ cmpq(rdx, rax);
+ __ j(not_equal, ¬_same);
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(¬_same);
+
+ // Check that both are sequential ASCII strings.
+ __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
+
+ // Inline comparison of ascii strings.
+ GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
}
#undef __
@@ -7973,6 +8388,7 @@
#endif
+
#undef __
} } // namespace v8::internal
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 8539884..50bb023 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -436,7 +436,7 @@
void GenericBinaryOperation(
Token::Value op,
- SmiAnalysis* type,
+ StaticType* type,
OverwriteMode overwrite_mode);
// If possible, combine two constant smi values using op to produce
@@ -449,7 +449,7 @@
void ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> constant_operand,
- SmiAnalysis* type,
+ StaticType* type,
bool reversed,
OverwriteMode overwrite_mode);
@@ -474,7 +474,9 @@
// at most 16 bits of user-controlled data per assembly operation.
void LoadUnsafeSmi(Register target, Handle<Object> value);
- void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+ void CallWithArguments(ZoneList<Expression*>* arguments,
+ CallFunctionFlags flags,
+ int position);
// Use an optimized version of Function.prototype.apply that avoid
// allocating the arguments object and just copies the arguments
@@ -538,15 +540,18 @@
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
- // Fast support for Math.sin and Math.cos.
- enum MathOp { SIN, COS };
- void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
- inline void GenerateMathSin(ZoneList<Expression*>* args);
- inline void GenerateMathCos(ZoneList<Expression*>* args);
-
// Fast support for StringAdd.
void GenerateStringAdd(ZoneList<Expression*>* args);
+ // Fast support for SubString.
+ void GenerateSubString(ZoneList<Expression*>* args);
+
+ // Fast support for StringCompare.
+ void GenerateStringCompare(ZoneList<Expression*>* args);
+
+ // Support for direct calls from JavaScript to native RegExp code.
+ void GenerateRegExpExec(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -614,46 +619,6 @@
};
-// -------------------------------------------------------------------------
-// Code stubs
-//
-// These independent code objects are created once, and used multiple
-// times by generated code to perform common tasks, often the slow
-// case of a JavaScript operation. They are all subclasses of CodeStub,
-// which is declared in code-stubs.h.
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#ifdef DEBUG
- void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
-#endif
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
-class ToBooleanStub: public CodeStub {
- public:
- ToBooleanStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return 0; }
-};
-
-
// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0,
@@ -670,7 +635,8 @@
mode_(mode),
flags_(flags),
args_in_registers_(false),
- args_reversed_(false) {
+ args_reversed_(false),
+ name_(NULL) {
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -689,6 +655,7 @@
bool args_in_registers_; // Arguments passed in registers not on the stack.
bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
+ char* name_;
const char* GetName();
@@ -745,6 +712,58 @@
};
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+ NO_STRING_ADD_FLAGS = 0,
+ NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+};
+
+
+class StringAddStub: public CodeStub {
+ public:
+ explicit StringAddStub(StringAddFlags flags) {
+ string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
+ }
+
+ private:
+ Major MajorKey() { return StringAdd; }
+ int MinorKey() { return string_check_ ? 0 : 1; }
+
+ void Generate(MacroAssembler* masm);
+
+ void GenerateCopyCharacters(MacroAssembler* masm,
+ Register desc,
+ Register src,
+ Register count,
+ bool ascii);
+
+ // Should the stub check whether arguments are strings?
+ bool string_check_;
+};
+
+
+class StringCompareStub: public CodeStub {
+ public:
+ explicit StringCompareStub() {}
+
+ // Compare two flat ascii strings and returns result in rax after popping two
+ // arguments from the stack.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ private:
+ Major MajorKey() { return StringCompare; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/fast-codegen-x64.cc
index 333a47d..0f28433 100644
--- a/src/x64/fast-codegen-x64.cc
+++ b/src/x64/fast-codegen-x64.cc
@@ -62,11 +62,9 @@
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = fun->scope()->num_stack_slots();
- if (locals_count <= 1) {
- if (locals_count > 0) {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- }
- } else {
+ if (locals_count == 1) {
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ } else if (locals_count > 1) {
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
for (int i = 0; i < locals_count; i++) {
__ push(rdx);
@@ -132,6 +130,10 @@
Move(dot_arguments_slot, rcx, rbx, rdx);
}
+ { Comment cmnt(masm_, "[ Declarations");
+ VisitDeclarations(fun->scope()->declarations());
+ }
+
{ Comment cmnt(masm_, "[ Stack check");
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
@@ -141,10 +143,6 @@
__ bind(&ok);
}
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(fun->scope()->declarations());
- }
-
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
@@ -202,219 +200,461 @@
}
-void FastCodeGenerator::Move(Expression::Context context, Register source) {
+void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
+
case Expression::kEffect:
+ // Nothing to do.
break;
+
case Expression::kValue:
- __ push(source);
+ // Move value into place.
+ switch (location_) {
+ case kAccumulator:
+ if (!reg.is(result_register())) __ movq(result_register(), reg);
+ break;
+ case kStack:
+ __ push(reg);
+ break;
+ }
break;
+
case Expression::kTest:
- TestAndBranch(source, true_label_, false_label_);
+ // For simplicity we always test the accumulator register.
+ if (!reg.is(result_register())) __ movq(result_register(), reg);
+ DoTest(context);
break;
- case Expression::kValueTest: {
- Label discard;
- __ push(source);
- TestAndBranch(source, true_label_, &discard);
- __ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(false_label_);
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ if (!reg.is(result_register())) __ movq(result_register(), reg);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ DoTest(context);
break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ push(source);
- TestAndBranch(source, &discard, false_label_);
- __ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(true_label_);
- break;
- }
}
}
-template <>
-Operand FastCodeGenerator::CreateSlotOperand<Operand>(Slot* source,
- Register scratch) {
- switch (source->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- return Operand(rbp, SlotOffset(source));
- case Slot::CONTEXT: {
- int context_chain_length =
- function_->scope()->ContextChainLength(source->var()->scope());
- __ LoadContext(scratch, context_chain_length);
- return CodeGenerator::ContextOperand(scratch, source->index());
- break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- // Fall-through.
- default:
- UNREACHABLE();
- return Operand(rax, 0); // Dead code to make the compiler happy.
- }
-}
-
-
-void FastCodeGenerator::Move(Register dst, Slot* source) {
- Operand location = CreateSlotOperand<Operand>(source, dst);
- __ movq(dst, location);
-}
-
-
-void FastCodeGenerator::Move(Expression::Context context,
- Slot* source,
- Register scratch) {
+void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
+ // Nothing to do.
break;
case Expression::kValue: {
- Operand location = CreateSlotOperand<Operand>(source, scratch);
- __ push(location);
+ MemOperand slot_operand = EmitSlotSearch(slot, result_register());
+ switch (location_) {
+ case kAccumulator:
+ __ movq(result_register(), slot_operand);
+ break;
+ case kStack:
+ // Memory operands can be pushed directly.
+ __ push(slot_operand);
+ break;
+ }
break;
}
- case Expression::kTest: // Fall through.
- case Expression::kValueTest: // Fall through.
+
+ case Expression::kTest:
+ Move(result_register(), slot);
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
case Expression::kTestValue:
- Move(scratch, source);
- Move(context, scratch);
+ Move(result_register(), slot);
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ DoTest(context);
break;
}
}
-void FastCodeGenerator::Move(Expression::Context context, Literal* expr) {
+void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
+ // Nothing to do.
break;
case Expression::kValue:
- __ Push(expr->handle());
+ switch (location_) {
+ case kAccumulator:
+ __ Move(result_register(), lit->handle());
+ break;
+ case kStack:
+ __ Push(lit->handle());
+ break;
+ }
break;
- case Expression::kTest: // Fall through.
- case Expression::kValueTest: // Fall through.
+
+ case Expression::kTest:
+ __ Move(result_register(), lit->handle());
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
case Expression::kTestValue:
- __ Move(rax, expr->handle());
- Move(context, rax);
+ __ Move(result_register(), lit->handle());
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ __ push(result_register());
+ break;
+ }
+ DoTest(context);
break;
}
}
+void FastCodeGenerator::ApplyTOS(Expression::Context context) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ __ Drop(1);
+ break;
+
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ pop(result_register());
+ break;
+ case kStack:
+ break;
+ }
+ break;
+
+ case Expression::kTest:
+ __ pop(result_register());
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ __ pop(result_register());
+ break;
+ case kStack:
+ __ movq(result_register(), Operand(rsp, 0));
+ break;
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::DropAndApply(int count,
+ Expression::Context context,
+ Register reg) {
+ ASSERT(count > 0);
+ ASSERT(!reg.is(rsp));
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+
+ case Expression::kEffect:
+ __ Drop(count);
+ break;
+
+ case Expression::kValue:
+ switch (location_) {
+ case kAccumulator:
+ __ Drop(count);
+ if (!reg.is(result_register())) __ movq(result_register(), reg);
+ break;
+ case kStack:
+ if (count > 1) __ Drop(count - 1);
+ __ movq(Operand(rsp, 0), reg);
+ break;
+ }
+ break;
+
+ case Expression::kTest:
+ __ Drop(count);
+ if (!reg.is(result_register())) __ movq(result_register(), reg);
+ DoTest(context);
+ break;
+
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ __ Drop(count);
+ if (!reg.is(result_register())) __ movq(result_register(), reg);
+ break;
+ case kStack:
+ if (count > 1) __ Drop(count - 1);
+ __ movq(result_register(), reg);
+ __ movq(Operand(rsp, 0), result_register());
+ break;
+ }
+ DoTest(context);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::Apply(Expression::Context context,
+ Label* materialize_true,
+ Label* materialize_false) {
+ switch (context) {
+ case Expression::kUninitialized:
+
+ case Expression::kEffect:
+ ASSERT_EQ(materialize_true, materialize_false);
+ __ bind(materialize_true);
+ break;
+
+ case Expression::kValue: {
+ Label done;
+ switch (location_) {
+ case kAccumulator:
+ __ bind(materialize_true);
+ __ Move(result_register(), Factory::true_value());
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ Move(result_register(), Factory::false_value());
+ break;
+ case kStack:
+ __ bind(materialize_true);
+ __ Push(Factory::true_value());
+ __ jmp(&done);
+ __ bind(materialize_false);
+ __ Push(Factory::false_value());
+ break;
+ }
+ __ bind(&done);
+ break;
+ }
+
+ case Expression::kTest:
+ break;
+
+ case Expression::kValueTest:
+ __ bind(materialize_true);
+ switch (location_) {
+ case kAccumulator:
+ __ Move(result_register(), Factory::true_value());
+ break;
+ case kStack:
+ __ Push(Factory::true_value());
+ break;
+ }
+ __ jmp(true_label_);
+ break;
+
+ case Expression::kTestValue:
+ __ bind(materialize_false);
+ switch (location_) {
+ case kAccumulator:
+ __ Move(result_register(), Factory::false_value());
+ break;
+ case kStack:
+ __ Push(Factory::false_value());
+ break;
+ }
+ __ jmp(false_label_);
+ break;
+ }
+}
+
+
+void FastCodeGenerator::DoTest(Expression::Context context) {
+ // The value to test is in the accumulator. If the value might be needed
+ // on the stack (value/test and test/value contexts with a stack location
+ // desired), then the value is already duplicated on the stack.
+ ASSERT_NE(NULL, true_label_);
+ ASSERT_NE(NULL, false_label_);
+
+ // In value/test and test/value expression contexts with stack as the
+ // desired location, there is already an extra value on the stack. Use a
+ // label to discard it if unneeded.
+ Label discard;
+ Label* if_true = true_label_;
+ Label* if_false = false_label_;
+ switch (context) {
+ case Expression::kUninitialized:
+ case Expression::kEffect:
+ case Expression::kValue:
+ UNREACHABLE();
+ case Expression::kTest:
+ break;
+ case Expression::kValueTest:
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ if_false = &discard;
+ break;
+ }
+ break;
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ break;
+ case kStack:
+ if_true = &discard;
+ break;
+ }
+ break;
+ }
+
+ // Emit the inlined tests assumed by the stub.
+ __ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
+ __ j(equal, if_false);
+ __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
+ __ j(equal, if_true);
+ __ CompareRoot(result_register(), Heap::kFalseValueRootIndex);
+ __ j(equal, if_false);
+ ASSERT_EQ(0, kSmiTag);
+ __ SmiCompare(result_register(), Smi::FromInt(0));
+ __ j(equal, if_false);
+ Condition is_smi = masm_->CheckSmi(result_register());
+ __ j(is_smi, if_true);
+
+ // Save a copy of the value if it may be needed and isn't already saved.
+ switch (context) {
+ case Expression::kUninitialized:
+ case Expression::kEffect:
+ case Expression::kValue:
+ UNREACHABLE();
+ case Expression::kTest:
+ break;
+ case Expression::kValueTest:
+ switch (location_) {
+ case kAccumulator:
+ __ push(result_register());
+ break;
+ case kStack:
+ break;
+ }
+ break;
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ __ push(result_register());
+ break;
+ case kStack:
+ break;
+ }
+ break;
+ }
+
+ // Call the ToBoolean stub for all other cases.
+ ToBooleanStub stub;
+ __ push(result_register());
+ __ CallStub(&stub);
+ __ testq(rax, rax);
+
+ // The stub returns nonzero for true. Complete based on the context.
+ switch (context) {
+ case Expression::kUninitialized:
+ case Expression::kEffect:
+ case Expression::kValue:
+ UNREACHABLE();
+
+ case Expression::kTest:
+ __ j(not_zero, true_label_);
+ __ jmp(false_label_);
+ break;
+
+ case Expression::kValueTest:
+ switch (location_) {
+ case kAccumulator:
+ __ j(zero, &discard);
+ __ pop(result_register());
+ __ jmp(true_label_);
+ break;
+ case kStack:
+ __ j(not_zero, true_label_);
+ break;
+ }
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(false_label_);
+ break;
+
+ case Expression::kTestValue:
+ switch (location_) {
+ case kAccumulator:
+ __ j(not_zero, &discard);
+ __ pop(result_register());
+ __ jmp(false_label_);
+ break;
+ case kStack:
+ __ j(zero, false_label_);
+ break;
+ }
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(true_label_);
+ break;
+ }
+}
+
+
+MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return Operand(rbp, SlotOffset(slot));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(slot->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return CodeGenerator::ContextOperand(scratch, slot->index());
+ }
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return Operand(rax, 0);
+}
+
+
+void FastCodeGenerator::Move(Register destination, Slot* source) {
+ MemOperand location = EmitSlotSearch(source, destination);
+ __ movq(destination, location);
+}
+
+
void FastCodeGenerator::Move(Slot* dst,
Register src,
Register scratch1,
Register scratch2) {
- switch (dst->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- __ movq(Operand(rbp, SlotOffset(dst)), src);
- break;
- case Slot::CONTEXT: {
- ASSERT(!src.is(scratch1));
- ASSERT(!src.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- int context_chain_length =
- function_->scope()->ContextChainLength(dst->var()->scope());
- __ LoadContext(scratch1, context_chain_length);
- __ movq(Operand(scratch1, Context::SlotOffset(dst->index())), src);
- int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
- __ RecordWrite(scratch1, offset, src, scratch2);
- break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- default:
- UNREACHABLE();
+ ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
+ ASSERT(!scratch1.is(src) && !scratch2.is(src));
+ MemOperand location = EmitSlotSearch(dst, scratch1);
+ __ movq(location, src);
+ // Emit the write barrier code if the location is in the heap.
+ if (dst->type() == Slot::CONTEXT) {
+ int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
+ __ RecordWrite(scratch1, offset, src, scratch2);
}
}
-void FastCodeGenerator::DropAndMove(Expression::Context context,
- Register source,
- int drop_count) {
- ASSERT(drop_count > 0);
- switch (context) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- __ addq(rsp, Immediate(drop_count * kPointerSize));
- break;
- case Expression::kValue:
- if (drop_count > 1) {
- __ addq(rsp, Immediate((drop_count - 1) * kPointerSize));
- }
- __ movq(Operand(rsp, 0), source);
- break;
- case Expression::kTest:
- ASSERT(!source.is(rsp));
- __ addq(rsp, Immediate(drop_count * kPointerSize));
- TestAndBranch(source, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (drop_count > 1) {
- __ addq(rsp, Immediate((drop_count - 1) * kPointerSize));
- }
- __ movq(Operand(rsp, 0), source);
- TestAndBranch(source, true_label_, &discard);
- __ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ movq(Operand(rsp, 0), source);
- TestAndBranch(source, &discard, false_label_);
- __ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(true_label_);
- break;
- }
- }
-}
-
-
-void FastCodeGenerator::TestAndBranch(Register source,
- Label* true_label,
- Label* false_label) {
- ASSERT_NE(NULL, true_label);
- ASSERT_NE(NULL, false_label);
- // Use the shared ToBoolean stub to compile the value in the register into
- // control flow to the code generator's true and false labels. Perform
- // the fast checks assumed by the stub.
-
- // The undefined value is false.
- __ CompareRoot(source, Heap::kUndefinedValueRootIndex);
- __ j(equal, false_label);
- __ CompareRoot(source, Heap::kTrueValueRootIndex); // True is true.
- __ j(equal, true_label);
- __ CompareRoot(source, Heap::kFalseValueRootIndex); // False is false.
- __ j(equal, false_label);
- ASSERT_EQ(0, kSmiTag);
- __ SmiCompare(source, Smi::FromInt(0)); // The smi zero is false.
- __ j(equal, false_label);
- Condition is_smi = masm_->CheckSmi(source); // All other smis are true.
- __ j(is_smi, true_label);
-
- // Call the stub for all other cases.
- __ push(source);
- ToBooleanStub stub;
- __ CallStub(&stub);
- __ testq(rax, rax); // The stub returns nonzero for true.
- __ j(not_zero, true_label);
- __ jmp(false_label);
-}
-
-
void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = decl->proxy()->var();
@@ -424,18 +664,21 @@
if (slot != NULL) {
switch (slot->type()) {
- case Slot::PARAMETER: // Fall through.
+ case Slot::PARAMETER:
case Slot::LOCAL:
if (decl->mode() == Variable::CONST) {
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
+ __ movq(Operand(rbp, SlotOffset(slot)), kScratchRegister);
} else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(Operand(rbp, SlotOffset(var->slot())));
+ VisitForValue(decl->fun(), kAccumulator);
+ __ movq(Operand(rbp, SlotOffset(slot)), result_register());
}
break;
case Slot::CONTEXT:
+ // We bypass the general EmitSlotSearch because we know more about
+ // this specific context.
+
// The variable in the decl always resides in the current context.
ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
if (FLAG_debug_code) {
@@ -451,11 +694,11 @@
kScratchRegister);
// No write barrier since the hole value is in old space.
} else if (decl->fun() != NULL) {
- Visit(decl->fun());
- __ pop(rax);
- __ movq(CodeGenerator::ContextOperand(rsi, slot->index()), rax);
+ VisitForValue(decl->fun(), kAccumulator);
+ __ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
+ result_register());
int offset = Context::SlotOffset(slot->index());
- __ RecordWrite(rsi, offset, rax, rcx);
+ __ RecordWrite(rsi, offset, result_register(), rcx);
}
break;
@@ -475,7 +718,7 @@
if (decl->mode() == Variable::CONST) {
__ PushRoot(Heap::kTheHoleValueRootIndex);
} else if (decl->fun() != NULL) {
- Visit(decl->fun());
+ VisitForValue(decl->fun(), kStack);
} else {
__ Push(Smi::FromInt(0)); // no initial value!
}
@@ -488,28 +731,24 @@
if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
- ASSERT_EQ(Expression::kValue, prop->obj()->context());
- Visit(prop->obj());
- ASSERT_EQ(Expression::kValue, prop->key()->context());
- Visit(prop->key());
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
if (decl->fun() != NULL) {
- ASSERT_EQ(Expression::kValue, decl->fun()->context());
- Visit(decl->fun());
- __ pop(rax);
+ VisitForValue(decl->fun(), kAccumulator);
} else {
- __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
}
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
-
// Absence of a test rax instruction following the call
// indicates that none of the load was inlined.
+ __ nop();
// Value in rax is ignored (declarations are statements). Receiver
// and key on stack are discarded.
- __ addq(rsp, Immediate(2 * kPointerSize));
+ __ Drop(2);
}
}
}
@@ -525,20 +764,6 @@
}
-void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
- Comment cmnt(masm_, "[ ReturnStatement");
- Expression* expr = stmt->expression();
- if (expr->AsLiteral() != NULL) {
- __ Move(rax, expr->AsLiteral()->handle());
- } else {
- Visit(expr);
- ASSERT_EQ(Expression::kValue, expr->context());
- __ pop(rax);
- }
- EmitReturnSequence(stmt->statement_pos());
-}
-
-
void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
@@ -553,34 +778,39 @@
__ push(rsi);
__ Push(boilerplate);
__ CallRuntime(Runtime::kNewClosure, 2);
- Move(expr->context(), rax);
+ Apply(context_, rax);
}
void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
- Expression* rewrite = expr->var()->rewrite();
+ EmitVariableLoad(expr->var(), context_);
+}
+
+
+void FastCodeGenerator::EmitVariableLoad(Variable* var,
+ Expression::Context context) {
+ Expression* rewrite = var->rewrite();
if (rewrite == NULL) {
- ASSERT(expr->var()->is_global());
+ ASSERT(var->is_global());
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ push(CodeGenerator::GlobalObject());
- __ Move(rcx, expr->name());
+ __ Move(rcx, var->name());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
// A test rax instruction following the call is used by the IC to
// indicate that the inobject property case was inlined. Ensure there
// is no test rax instruction here.
__ nop();
-
- DropAndMove(expr->context(), rax);
+ DropAndApply(1, context, rax);
} else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
if (FLAG_debug_code) {
switch (slot->type()) {
- case Slot::LOCAL:
- case Slot::PARAMETER: {
+ case Slot::PARAMETER:
+ case Slot::LOCAL: {
Comment cmnt(masm_, "Stack slot");
break;
}
@@ -591,45 +821,45 @@
case Slot::LOOKUP:
UNIMPLEMENTED();
break;
- default:
- UNREACHABLE();
}
}
- Move(expr->context(), slot, rax);
+ Apply(context, slot);
} else {
- // A variable has been rewritten into an explicit access to
- // an object property.
+ Comment cmnt(masm_, "Variable rewritten to property");
+ // A variable has been rewritten into an explicit access to an object
+ // property.
Property* property = rewrite->AsProperty();
ASSERT_NOT_NULL(property);
- // Currently the only parameter expressions that can occur are
- // on the form "slot[literal]".
+ // The only property expressions that can occur are of the form
+ // "slot[literal]".
- // Check that the object is in a slot.
- Variable* object = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object);
- Slot* object_slot = object->slot();
+ // Assert that the object is in a slot.
+ Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(object_var);
+ Slot* object_slot = object_var->slot();
ASSERT_NOT_NULL(object_slot);
// Load the object.
- Move(Expression::kValue, object_slot, rax);
+ MemOperand object_loc = EmitSlotSearch(object_slot, rax);
+ __ push(object_loc);
- // Check that the key is a smi.
+ // Assert that the key is a smi.
Literal* key_literal = property->key()->AsLiteral();
ASSERT_NOT_NULL(key_literal);
ASSERT(key_literal->handle()->IsSmi());
// Load the key.
- Move(Expression::kValue, key_literal);
+ __ Push(key_literal->handle());
- // Do a KEYED property load.
+ // Do a keyed property load.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
- // Notice: We must not have a "test rax, ..." instruction after
- // the call. It is treated specially by the LoadIC code.
-
+ // Notice: We must not have a "test rax, ..." instruction after the
+ // call. It is treated specially by the LoadIC code.
+ __ nop();
// Drop key and object left on the stack by IC, and push the result.
- DropAndMove(expr->context(), rax, 2);
+ DropAndApply(2, context, rax);
}
}
@@ -655,44 +885,25 @@
__ Push(expr->pattern());
__ Push(expr->flags());
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- // Label done:
__ bind(&done);
- Move(expr->context(), rax);
+ Apply(context_, rax);
}
void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Label boilerplate_exists;
-
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rbx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ movq(rax, FieldOperand(rbx, literal_offset));
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &boilerplate_exists);
- // Create boilerplate if it does not exist.
- // Literal array (0).
- __ push(rbx);
- // Literal index (1).
+ __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
- // Constant properties (2).
__ Push(expr->constant_properties());
- __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
- __ bind(&boilerplate_exists);
- // rax contains boilerplate.
- // Clone boilerplate.
- __ push(rax);
- if (expr->depth() == 1) {
- __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+ if (expr->depth() > 1) {
+ __ CallRuntime(Runtime::kCreateObjectLiteral, 3);
} else {
- __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+ __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
}
- // If result_saved == true: The result is saved on top of the
- // stack and in rax.
- // If result_saved == false: The result not on the stack, just in rax.
+ // If result_saved is true the result is on top of the stack. If
+ // result_saved is false the result is in rax.
bool result_saved = false;
for (int i = 0; i < expr->properties()->length(); i++) {
@@ -706,108 +917,59 @@
result_saved = true;
}
switch (property->kind()) {
- case ObjectLiteral::Property::MATERIALIZED_LITERAL: // fall through
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+ // Fall through.
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
- __ pop(rax);
+ VisitForValue(value, kAccumulator);
__ Move(rcx, key->handle());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
// StoreIC leaves the receiver on the stack.
- __ movq(rax, Operand(rsp, 0)); // Restore result back into rax.
break;
}
- // fall through
+ // Fall through.
case ObjectLiteral::Property::PROTOTYPE:
- __ push(rax);
- Visit(key);
- ASSERT_EQ(Expression::kValue, key->context());
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
+ __ push(Operand(rsp, 0)); // Duplicate receiver.
+ VisitForValue(key, kStack);
+ VisitForValue(value, kStack);
__ CallRuntime(Runtime::kSetProperty, 3);
- __ movq(rax, Operand(rsp, 0)); // Restore result into rax.
break;
- case ObjectLiteral::Property::SETTER: // fall through
+ case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
- __ push(rax);
- Visit(key);
- ASSERT_EQ(Expression::kValue, key->context());
+ __ push(Operand(rsp, 0)); // Duplicate receiver.
+ VisitForValue(key, kStack);
__ Push(property->kind() == ObjectLiteral::Property::SETTER ?
Smi::FromInt(1) :
Smi::FromInt(0));
- Visit(value);
- ASSERT_EQ(Expression::kValue, value->context());
+ VisitForValue(value, kStack);
__ CallRuntime(Runtime::kDefineAccessor, 4);
- __ movq(rax, Operand(rsp, 0)); // Restore result into rax.
break;
- default: UNREACHABLE();
}
}
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- if (result_saved) __ addq(rsp, Immediate(kPointerSize));
- break;
- case Expression::kValue:
- if (!result_saved) __ push(rax);
- break;
- case Expression::kTest:
- if (result_saved) __ pop(rax);
- TestAndBranch(rax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (!result_saved) __ push(rax);
- TestAndBranch(rax, true_label_, &discard);
- __ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (!result_saved) __ push(rax);
- TestAndBranch(rax, &discard, false_label_);
- __ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(true_label_);
- break;
- }
+
+ if (result_saved) {
+ ApplyTOS(context_);
+ } else {
+ Apply(context_, rax);
}
}
void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- Label make_clone;
-
- // Fetch the function's literals array.
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rbx, FieldOperand(rbx, JSFunction::kLiteralsOffset));
- // Check if the literal's boilerplate has been instantiated.
- int offset =
- FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
- __ movq(rax, FieldOperand(rbx, offset));
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &make_clone);
-
- // Instantiate the boilerplate.
- __ push(rbx);
+ __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->literals());
- __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
-
- __ bind(&make_clone);
- // Clone the boilerplate.
- __ push(rax);
+ __ Push(expr->constant_elements());
if (expr->depth() > 1) {
- __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+ __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else {
- __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+ __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
}
bool result_saved = false; // Is the result saved to the stack?
@@ -828,156 +990,86 @@
__ push(rax);
result_saved = true;
}
- Visit(subexpr);
- ASSERT_EQ(Expression::kValue, subexpr->context());
+ VisitForValue(subexpr, kAccumulator);
// Store the subexpression value in the array's elements.
- __ pop(rax); // Subexpression value.
__ movq(rbx, Operand(rsp, 0)); // Copy of array literal.
__ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ movq(FieldOperand(rbx, offset), rax);
+ __ movq(FieldOperand(rbx, offset), result_register());
// Update the write barrier for the array store.
- __ RecordWrite(rbx, offset, rax, rcx);
+ __ RecordWrite(rbx, offset, result_register(), rcx);
}
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- if (result_saved) __ addq(rsp, Immediate(kPointerSize));
- break;
- case Expression::kValue:
- if (!result_saved) __ push(rax);
- break;
- case Expression::kTest:
- if (result_saved) __ pop(rax);
- TestAndBranch(rax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- if (!result_saved) __ push(rax);
- TestAndBranch(rax, true_label_, &discard);
- __ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- if (!result_saved) __ push(rax);
- TestAndBranch(rax, &discard, false_label_);
- __ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(true_label_);
- break;
- }
+ if (result_saved) {
+ ApplyTOS(context_);
+ } else {
+ Apply(context_, rax);
}
}
-void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Literal* key = prop->key()->AsLiteral();
+ __ Move(rcx, key->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+}
+
+
+void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetSourcePosition(prop->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+}
+
+
+void FastCodeGenerator::EmitBinaryOp(Token::Value op,
+ Expression::Context context) {
+ __ push(result_register());
+ GenericBinaryOpStub stub(op,
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS);
+ __ CallStub(&stub);
+ Apply(context, rax);
+}
+
+
+void FastCodeGenerator::EmitVariableAssignment(Variable* var,
+ Expression::Context context) {
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
if (var->is_global()) {
// Assignment to a global variable. Use inline caching for the
// assignment. Right-hand-side value is passed in rax, variable name in
// rcx, and the global object on the stack.
- __ pop(rax);
__ Move(rcx, var->name());
__ push(CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Overwrite the global object on the stack with the result if needed.
- DropAndMove(expr->context(), rax);
+ DropAndApply(1, context, rax);
- } else if (var->slot()) {
+ } else if (var->slot() != NULL) {
Slot* slot = var->slot();
- ASSERT_NOT_NULL(slot); // Variables rewritten as properties not handled.
switch (slot->type()) {
case Slot::LOCAL:
- case Slot::PARAMETER: {
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect:
- // Perform assignment and discard value.
- __ pop(Operand(rbp, SlotOffset(var->slot())));
- break;
- case Expression::kValue:
- // Perform assignment and preserve value.
- __ movq(rax, Operand(rsp, 0));
- __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
- break;
- case Expression::kTest:
- // Perform assignment and test (and discard) value.
- __ pop(rax);
- __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
- TestAndBranch(rax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ movq(rax, Operand(rsp, 0));
- __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
- TestAndBranch(rax, true_label_, &discard);
- __ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ movq(rax, Operand(rsp, 0));
- __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
- TestAndBranch(rax, &discard, false_label_);
- __ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(true_label_);
- break;
- }
- }
+ case Slot::PARAMETER:
+ __ movq(Operand(rbp, SlotOffset(slot)), result_register());
break;
- }
case Slot::CONTEXT: {
- int chain_length =
- function_->scope()->ContextChainLength(slot->var()->scope());
- if (chain_length > 0) {
- // Move up the context chain to the context containing the slot.
- __ movq(rax,
- Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- __ movq(rax, FieldOperand(rax, JSFunction::kContextOffset));
- for (int i = 1; i < chain_length; i++) {
- __ movq(rax,
- Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ movq(rax, FieldOperand(rax, JSFunction::kContextOffset));
- }
- } else { // Slot is in the current context. Generate optimized code.
- __ movq(rax, rsi); // RecordWrite destroys the object register.
- }
- if (FLAG_debug_code) {
- __ cmpq(rax,
- Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- __ Check(equal, "Context Slot chain length wrong.");
- }
- __ pop(rcx);
- __ movq(Operand(rax, Context::SlotOffset(slot->index())), rcx);
+ MemOperand target = EmitSlotSearch(slot, rcx);
+ __ movq(target, result_register());
// RecordWrite may destroy all its register arguments.
- if (expr->context() == Expression::kValue) {
- __ push(rcx);
- } else if (expr->context() != Expression::kEffect) {
- __ movq(rdx, rcx);
- }
+ __ movq(rdx, result_register());
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ RecordWrite(rax, offset, rcx, rbx);
- if (expr->context() != Expression::kEffect &&
- expr->context() != Expression::kValue) {
- Move(expr->context(), rdx);
- }
+ __ RecordWrite(rcx, offset, rdx, rbx);
break;
}
@@ -985,6 +1077,11 @@
UNREACHABLE();
break;
}
+ Apply(context, result_register());
+ } else {
+ // Variables rewritten as properties are not treated as variables in
+ // assignments.
+ UNREACHABLE();
}
}
@@ -999,14 +1096,18 @@
// change to slow case to avoid the quadratic behavior of repeatedly
// adding fast properties.
if (expr->starts_initialization_block()) {
- __ push(Operand(rsp, kPointerSize)); // Receiver is under value.
+ __ push(result_register());
+ __ push(Operand(rsp, kPointerSize)); // Receiver is now under value.
__ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
}
- __ pop(rax);
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
__ Move(rcx, prop->key()->AsLiteral()->handle());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -1016,7 +1117,7 @@
__ pop(rax);
}
- DropAndMove(expr->context(), rax);
+ DropAndApply(1, context_, rax);
}
@@ -1027,12 +1128,15 @@
// change to slow case to avoid the quadratic behavior of repeatedly
// adding fast properties.
if (expr->starts_initialization_block()) {
- // Reciever is under the key and value.
+ __ push(result_register());
+ // Receiver is now under the key and value.
__ push(Operand(rsp, 2 * kPointerSize));
__ CallRuntime(Runtime::kToSlowProperties, 1);
+ __ pop(result_register());
}
- __ pop(rax);
+ // Record source code position before IC call.
+ SetSourcePosition(expr->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// This nop signals to the IC that there is no inlined code at the call
@@ -1042,73 +1146,57 @@
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
__ push(rax); // Result of assignment, saved even if not needed.
- // Reciever is under the key and value.
+ // Receiver is under the key and value.
__ push(Operand(rsp, 2 * kPointerSize));
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(rax);
}
// Receiver and key are still on stack.
- __ addq(rsp, Immediate(2 * kPointerSize));
- Move(expr->context(), rax);
+ DropAndApply(2, context_, rax);
}
void FastCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
- uint32_t dummy;
-
- // Record the source position for the property load.
- SetSourcePosition(expr->position());
// Evaluate receiver.
- Visit(expr->obj());
+ VisitForValue(expr->obj(), kStack);
-
- if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
- !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
- // Do a NAMED property load.
- // The IC expects the property name in rcx and the receiver on the stack.
- __ Move(rcx, key->AsLiteral()->handle());
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // By emitting a nop we make sure that we do not have a "test rax,..."
- // instruction after the call it is treated specially by the LoadIC code.
- __ nop();
+ if (key->IsPropertyName()) {
+ EmitNamedPropertyLoad(expr);
+ // Drop receiver left on the stack by IC.
+ DropAndApply(1, context_, rax);
} else {
- // Do a KEYED property load.
- Visit(expr->key());
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Notice: We must not have a "test rax, ..." instruction after
- // the call. It is treated specially by the LoadIC code.
-
- // Drop key left on the stack by IC.
- __ addq(rsp, Immediate(kPointerSize));
+ VisitForValue(expr->key(), kStack);
+ EmitKeyedPropertyLoad(expr);
+ // Drop key and receiver left on the stack by IC.
+ DropAndApply(2, context_, rax);
}
- DropAndMove(expr->context(), rax);
}
-void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) {
+void FastCodeGenerator::EmitCallWithIC(Call* expr,
+ Handle<Object> ignored,
+ RelocInfo::Mode mode) {
// Code common for calls using the IC.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ VisitForValue(args->at(i), kStack);
}
// Record source position for debugger.
SetSourcePosition(expr->position());
// Call the IC initialization code.
+ InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
- NOT_IN_LOOP);
- __ call(ic, reloc_info);
+ in_loop);
+ __ Call(ic, mode);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- DropAndMove(expr->context(), rax);
+ DropAndApply(1, context_, rax);
}
@@ -1117,16 +1205,16 @@
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
+ VisitForValue(args->at(i), kStack);
}
// Record source position for debugger.
SetSourcePosition(expr->position());
- CallFunctionStub stub(arg_count, NOT_IN_LOOP);
+ CallFunctionStub stub(arg_count, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
__ CallStub(&stub);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- DropAndMove(expr->context(), rax);
+ DropAndApply(1, context_, rax);
}
@@ -1143,7 +1231,7 @@
__ Push(var->name());
// Push global object as receiver for the call IC lookup.
__ push(CodeGenerator::GlobalObject());
- EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT);
+ EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
// Call to a lookup slot.
@@ -1155,13 +1243,13 @@
if (key != NULL && key->handle()->IsSymbol()) {
// Call to a named property, use call IC.
__ Push(key->handle());
- Visit(prop->obj());
- EmitCallWithIC(expr, RelocInfo::CODE_TARGET);
+ VisitForValue(prop->obj(), kStack);
+ EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else {
// Call to a keyed property, use keyed load IC followed by function
// call.
- Visit(prop->obj());
- Visit(prop->key());
+ VisitForValue(prop->obj(), kStack);
+ VisitForValue(prop->key(), kStack);
// Record source code position for IC call.
SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@@ -1170,14 +1258,15 @@
// instruction after the call it is treated specially by the LoadIC code.
__ nop();
// Drop key left on the stack by IC.
- __ addq(rsp, Immediate(kPointerSize));
+ __ Drop(1);
// Pop receiver.
__ pop(rbx);
// Push result (function).
__ push(rax);
// Push receiver object on stack.
if (prop->is_synthetic()) {
- __ push(CodeGenerator::GlobalObject());
+ __ movq(rcx, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
} else {
__ push(rbx);
}
@@ -1193,7 +1282,7 @@
loop_depth() == 0) {
lit->set_try_fast_codegen(true);
}
- Visit(fun);
+ VisitForValue(fun, kStack);
// Load global receiver object.
__ movq(rbx, CodeGenerator::GlobalObject());
__ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
@@ -1209,9 +1298,7 @@
// expression in new calls must be evaluated before the
// arguments.
// Push function on the stack.
- Visit(expr->expression());
- ASSERT_EQ(Expression::kValue, expr->expression()->context());
- // If location is value, already on the stack,
+ VisitForValue(expr->expression(), kStack);
// Push global object (receiver).
__ push(CodeGenerator::GlobalObject());
@@ -1220,10 +1307,7 @@
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
- // If location is value, it is already on the stack,
- // so nothing to do here.
+ VisitForValue(args->at(i), kStack);
}
// Call the construct call builtin that handles allocation and
@@ -1239,7 +1323,7 @@
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
// Replace function on TOS with result in rax, or pop it.
- DropAndMove(expr->context(), rax);
+ DropAndApply(1, context_, rax);
}
@@ -1257,8 +1341,7 @@
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- Visit(args->at(i));
- ASSERT_EQ(Expression::kValue, args->at(i)->context());
+ VisitForValue(args->at(i), kStack);
}
if (expr->is_jsruntime()) {
@@ -1269,82 +1352,10 @@
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- DropAndMove(expr->context(), rax);
+ DropAndApply(1, context_, rax);
} else {
__ CallRuntime(expr->function(), arg_count);
- Move(expr->context(), rax);
- }
-}
-
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
-
- Visit(proxy);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kValue: // Fall through
- case Expression::kTest: // Fall through
- case Expression::kTestValue: // Fall through
- case Expression::kValueTest:
- // Duplicate the result on the stack.
- __ push(rax);
- break;
- case Expression::kEffect:
- // Do not save result.
- break;
- }
- // Call runtime for +1/-1.
- __ push(rax);
- __ Push(Smi::FromInt(1));
- if (expr->op() == Token::INC) {
- __ CallRuntime(Runtime::kNumberAdd, 2);
- } else {
- __ CallRuntime(Runtime::kNumberSub, 2);
- }
- // Call Store IC.
- __ Move(rcx, proxy->AsVariable()->name());
- __ push(CodeGenerator::GlobalObject());
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Restore up stack after store IC
- __ addq(rsp, Immediate(kPointerSize));
-
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect: // Fall through
- case Expression::kValue:
- // Do nothing. Result in either on the stack for value context
- // or discarded for effect context.
- break;
- case Expression::kTest:
- __ pop(rax);
- TestAndBranch(rax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ movq(rax, Operand(rsp, 0));
- TestAndBranch(rax, true_label_, &discard);
- __ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ movq(rax, Operand(rsp, 0));
- TestAndBranch(rax, &discard, false_label_);
- __ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(true_label_);
- break;
- }
+ Apply(context_, rax);
}
}
@@ -1353,22 +1364,35 @@
switch (expr->op()) {
case Token::VOID: {
Comment cmnt(masm_, "[ UnaryOperation (VOID)");
- Visit(expr->expression());
- ASSERT_EQ(Expression::kEffect, expr->expression()->context());
- switch (expr->context()) {
+ VisitForEffect(expr->expression());
+ switch (context_) {
case Expression::kUninitialized:
UNREACHABLE();
break;
case Expression::kEffect:
break;
case Expression::kValue:
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ switch (location_) {
+ case kAccumulator:
+ __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
+ break;
+ case kStack:
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ break;
+ }
break;
case Expression::kTestValue:
// Value is false so it's needed.
- __ PushRoot(Heap::kUndefinedValueRootIndex);
+ switch (location_) {
+ case kAccumulator:
+ __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex);
+ break;
+ case kStack:
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ break;
+ }
// Fall through.
- case Expression::kTest: // Fall through.
+ case Expression::kTest:
case Expression::kValueTest:
__ jmp(false_label_);
break;
@@ -1378,70 +1402,39 @@
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
- ASSERT_EQ(Expression::kTest, expr->expression()->context());
-
- Label push_true;
- Label push_false;
- Label done;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
- switch (expr->context()) {
+ Label materialize_true, materialize_false, done;
+ // Initially assume a pure test context. Notice that the labels are
+ // swapped.
+ Label* if_true = false_label_;
+ Label* if_false = true_label_;
+ switch (context_) {
case Expression::kUninitialized:
UNREACHABLE();
break;
-
- case Expression::kValue:
- true_label_ = &push_false;
- false_label_ = &push_true;
- Visit(expr->expression());
- __ bind(&push_true);
- __ PushRoot(Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(&push_false);
- __ PushRoot(Heap::kFalseValueRootIndex);
- __ bind(&done);
- break;
-
case Expression::kEffect:
- true_label_ = &done;
- false_label_ = &done;
- Visit(expr->expression());
- __ bind(&done);
+ if_true = &done;
+ if_false = &done;
break;
-
+ case Expression::kValue:
+ if_true = &materialize_false;
+ if_false = &materialize_true;
+ break;
case Expression::kTest:
- true_label_ = saved_false;
- false_label_ = saved_true;
- Visit(expr->expression());
break;
-
case Expression::kValueTest:
- true_label_ = saved_false;
- false_label_ = &push_true;
- Visit(expr->expression());
- __ bind(&push_true);
- __ PushRoot(Heap::kTrueValueRootIndex);
- __ jmp(saved_true);
+ if_false = &materialize_true;
break;
-
case Expression::kTestValue:
- true_label_ = &push_false;
- false_label_ = saved_true;
- Visit(expr->expression());
- __ bind(&push_false);
- __ PushRoot(Heap::kFalseValueRootIndex);
- __ jmp(saved_false);
+ if_true = &materialize_false;
break;
}
- true_label_ = saved_true;
- false_label_ = saved_false;
+ VisitForControl(expr->expression(), if_true, if_false);
+ Apply(context_, if_false, if_true); // Labels swapped.
break;
}
case Token::TYPEOF: {
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- ASSERT_EQ(Expression::kValue, expr->expression()->context());
-
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL &&
!proxy->var()->is_this() &&
@@ -1463,11 +1456,11 @@
__ push(rax);
} else {
// This expression cannot throw a reference error at the top level.
- Visit(expr->expression());
+ VisitForValue(expr->expression(), kStack);
}
__ CallRuntime(Runtime::kTypeof, 1);
- Move(expr->context(), rax);
+ Apply(context_, rax);
break;
}
@@ -1477,13 +1470,142 @@
}
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ Comment cmnt(masm_, "[ CountOperation");
+
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type =
+ (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ Location saved_location = location_;
+ location_ = kStack;
+ EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
+ Expression::kValue);
+ location_ = saved_location;
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && context_ != Expression::kEffect) {
+ __ Push(Smi::FromInt(0));
+ }
+ VisitForValue(prop->obj(), kStack);
+ if (assign_type == NAMED_PROPERTY) {
+ EmitNamedPropertyLoad(prop);
+ } else {
+ VisitForValue(prop->key(), kStack);
+ EmitKeyedPropertyLoad(prop);
+ }
+ __ push(rax);
+ }
+
+ // Convert to number.
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ switch (context_) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ case Expression::kValue:
+ case Expression::kTest:
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(rax);
+ break;
+ case NAMED_PROPERTY:
+ __ movq(Operand(rsp, kPointerSize), rax);
+ break;
+ case KEYED_PROPERTY:
+ __ movq(Operand(rsp, 2 * kPointerSize), rax);
+ break;
+ }
+ break;
+ }
+ }
+
+ // Call stub for +1/-1.
+ __ push(rax);
+ __ Push(Smi::FromInt(1));
+ GenericBinaryOpStub stub(expr->binary_op(),
+ NO_OVERWRITE,
+ NO_GENERIC_BINARY_FLAGS);
+ __ CallStub(&stub);
+
+ // Store the value returned in rax.
+ switch (assign_type) {
+ case VARIABLE:
+ if (expr->is_postfix()) {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Expression::kEffect);
+ // For all contexts except kEffect: We have the result on
+ // top of the stack.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ context_);
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ Move(rcx, prop->key()->AsLiteral()->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+ if (expr->is_postfix()) {
+ __ Drop(1); // Result is on the stack under the receiver.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ DropAndApply(1, context_, rax);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+ if (expr->is_postfix()) {
+ __ Drop(2); // Result is on the stack under the key and the receiver.
+ if (context_ != Expression::kEffect) {
+ ApplyTOS(context_);
+ }
+ } else {
+ DropAndApply(2, context_, rax);
+ }
+ break;
+ }
+ }
+}
+
void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
Comment cmnt(masm_, "[ BinaryOperation");
switch (expr->op()) {
case Token::COMMA:
- ASSERT_EQ(Expression::kEffect, expr->left()->context());
- ASSERT_EQ(expr->context(), expr->right()->context());
- Visit(expr->left());
+ VisitForEffect(expr->left());
Visit(expr->right());
break;
@@ -1502,20 +1624,12 @@
case Token::BIT_XOR:
case Token::SHL:
case Token::SHR:
- case Token::SAR: {
- ASSERT_EQ(Expression::kValue, expr->left()->context());
- ASSERT_EQ(Expression::kValue, expr->right()->context());
-
- Visit(expr->left());
- Visit(expr->right());
- GenericBinaryOpStub stub(expr->op(),
- NO_OVERWRITE,
- NO_GENERIC_BINARY_FLAGS);
- __ CallStub(&stub);
- Move(expr->context(), rax);
-
+ case Token::SAR:
+ VisitForValue(expr->left(), kStack);
+ VisitForValue(expr->right(), kAccumulator);
+ EmitBinaryOp(expr->op(), context_);
break;
- }
+
default:
UNREACHABLE();
}
@@ -1524,95 +1638,85 @@
void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
- ASSERT_EQ(Expression::kValue, expr->left()->context());
- ASSERT_EQ(Expression::kValue, expr->right()->context());
- Visit(expr->left());
- Visit(expr->right());
- // Convert current context to test context: Pre-test code.
- Label push_true;
- Label push_false;
- Label done;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
- switch (expr->context()) {
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+ Label materialize_true, materialize_false, done;
+ // Initially assume we are in a test context.
+ Label* if_true = true_label_;
+ Label* if_false = false_label_;
+ switch (context_) {
case Expression::kUninitialized:
UNREACHABLE();
break;
-
- case Expression::kValue:
- true_label_ = &push_true;
- false_label_ = &push_false;
- break;
-
case Expression::kEffect:
- true_label_ = &done;
- false_label_ = &done;
+ if_true = &done;
+ if_false = &done;
break;
-
+ case Expression::kValue:
+ if_true = &materialize_true;
+ if_false = &materialize_false;
+ break;
case Expression::kTest:
break;
-
case Expression::kValueTest:
- true_label_ = &push_true;
+ if_true = &materialize_true;
break;
-
case Expression::kTestValue:
- false_label_ = &push_false;
+ if_false = &materialize_false;
break;
}
- // Convert current context to test context: End pre-test code.
+ VisitForValue(expr->left(), kStack);
switch (expr->op()) {
- case Token::IN: {
+ case Token::IN:
+ VisitForValue(expr->right(), kStack);
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
- __ j(equal, true_label_);
- __ jmp(false_label_);
+ __ j(equal, if_true);
+ __ jmp(if_false);
break;
- }
case Token::INSTANCEOF: {
+ VisitForValue(expr->right(), kStack);
InstanceofStub stub;
__ CallStub(&stub);
__ testq(rax, rax);
- __ j(zero, true_label_); // The stub returns 0 for true.
- __ jmp(false_label_);
+ __ j(zero, if_true); // The stub returns 0 for true.
+ __ jmp(if_false);
break;
}
default: {
+ VisitForValue(expr->right(), kAccumulator);
Condition cc = no_condition;
bool strict = false;
switch (expr->op()) {
case Token::EQ_STRICT:
strict = true;
- // Fall through
+ // Fall through.
case Token::EQ:
cc = equal;
- __ pop(rax);
__ pop(rdx);
break;
case Token::LT:
cc = less;
- __ pop(rax);
__ pop(rdx);
break;
case Token::GT:
// Reverse left and right sizes to obtain ECMA-262 conversion order.
cc = less;
- __ pop(rdx);
+ __ movq(rdx, result_register());
__ pop(rax);
break;
case Token::LTE:
// Reverse left and right sizes to obtain ECMA-262 conversion order.
cc = greater_equal;
- __ pop(rdx);
+ __ movq(rdx, result_register());
__ pop(rax);
break;
case Token::GTE:
cc = greater_equal;
- __ pop(rax);
__ pop(rdx);
break;
case Token::IN:
@@ -1626,61 +1730,78 @@
Label slow_case;
__ JumpIfNotBothSmi(rax, rdx, &slow_case);
__ SmiCompare(rdx, rax);
- __ j(cc, true_label_);
- __ jmp(false_label_);
+ __ j(cc, if_true);
+ __ jmp(if_false);
__ bind(&slow_case);
CompareStub stub(cc, strict);
__ CallStub(&stub);
__ testq(rax, rax);
- __ j(cc, true_label_);
- __ jmp(false_label_);
+ __ j(cc, if_true);
+ __ jmp(if_false);
}
}
- // Convert current context to test context: Post-test code.
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- break;
-
- case Expression::kValue:
- __ bind(&push_true);
- __ PushRoot(Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(&push_false);
- __ PushRoot(Heap::kFalseValueRootIndex);
- __ bind(&done);
- break;
-
- case Expression::kEffect:
- __ bind(&done);
- break;
-
- case Expression::kTest:
- break;
-
- case Expression::kValueTest:
- __ bind(&push_true);
- __ PushRoot(Heap::kTrueValueRootIndex);
- __ jmp(saved_true);
- break;
-
- case Expression::kTestValue:
- __ bind(&push_false);
- __ PushRoot(Heap::kFalseValueRootIndex);
- __ jmp(saved_false);
- break;
- }
- true_label_ = saved_true;
- false_label_ = saved_false;
- // Convert current context to test context: End post-test code.
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
+ Apply(context_, if_true, if_false);
}
void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- Move(expr->context(), rax);
+ Apply(context_, rax);
+}
+
+
+Register FastCodeGenerator::result_register() { return rax; }
+
+
+Register FastCodeGenerator::context_register() { return rsi; }
+
+
+void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+ ASSERT(IsAligned(frame_offset, kPointerSize));
+ __ movq(Operand(rbp, frame_offset), value);
+}
+
+
+void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
+ __ movq(dst, CodeGenerator::ContextOperand(rsi, context_index));
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+
+void FastCodeGenerator::EnterFinallyBlock() {
+ ASSERT(!result_register().is(rdx));
+ ASSERT(!result_register().is(rcx));
+ // Cook return address on top of stack (smi encoded Code* delta)
+ __ movq(rdx, Operand(rsp, 0));
+ __ Move(rcx, masm_->CodeObject());
+ __ subq(rdx, rcx);
+ __ Integer32ToSmi(rdx, rdx);
+ __ movq(Operand(rsp, 0), rdx);
+ // Store result register while executing finally block.
+ __ push(result_register());
+}
+
+
+void FastCodeGenerator::ExitFinallyBlock() {
+ ASSERT(!result_register().is(rdx));
+ ASSERT(!result_register().is(rcx));
+ // Restore result register from stack.
+ __ pop(result_register());
+ // Uncook return address.
+ __ movq(rdx, Operand(rsp, 0));
+ __ SmiToInteger32(rdx, rdx);
+ __ Move(rcx, masm_->CodeObject());
+ __ addq(rdx, rcx);
+ __ movq(Operand(rsp, 0), rdx);
+ // And return.
+ __ ret(0);
}
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index ccbc615..457ece5 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -48,9 +48,13 @@
// must always call a backup property load that is complete.
// This function is safe to call if the receiver has fast properties,
// or if name is not a symbol, and will jump to the miss_label in that case.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
- Register r0, Register r1, Register r2,
- Register name) {
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss_label,
+ Register r0,
+ Register r1,
+ Register r2,
+ Register name,
+ DictionaryCheck check_dictionary) {
// Register use:
//
// r0 - used to hold the property dictionary.
@@ -86,10 +90,14 @@
__ cmpb(r0, Immediate(JS_BUILTINS_OBJECT_TYPE));
__ j(equal, miss_label);
- // Check that the properties array is a dictionary.
+ // Load properties array.
__ movq(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
- __ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
- __ j(not_equal, miss_label);
+
+ if (check_dictionary == CHECK_DICTIONARY) {
+ // Check that the properties array is a dictionary.
+ __ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
+ __ j(not_equal, miss_label);
+ }
// Compute the capacity mask.
const int kCapacityOffset =
@@ -246,7 +254,8 @@
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
- Label slow, check_string, index_int, index_string, check_pixel_array;
+ Label slow, check_string, index_int, index_string;
+ Label check_pixel_array, probe_dictionary;
// Load name and receiver.
__ movq(rax, Operand(rsp, kPointerSize));
@@ -319,14 +328,69 @@
__ movl(rbx, FieldOperand(rax, String::kHashFieldOffset));
__ testl(rbx, Immediate(String::kIsArrayIndexMask));
- // If the string is a symbol, do a quick inline probe of the receiver's
- // dictionary, if it exists.
+ // Is the string a symbol?
__ j(not_zero, &index_string); // The value in rbx is used at jump target.
+ ASSERT(kSymbolTag != 0);
__ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
Immediate(kIsSymbolMask));
__ j(zero, &slow);
- // Probe the dictionary leaving result in rcx.
- GenerateDictionaryLoad(masm, &slow, rbx, rcx, rdx, rax);
+
+ // If the receiver is a fast-case object, check the keyed lookup
+ // cache. Otherwise probe the dictionary leaving result in rcx.
+ __ movq(rbx, FieldOperand(rcx, JSObject::kPropertiesOffset));
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), Factory::hash_table_map());
+ __ j(equal, &probe_dictionary);
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the string hash.
+ __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movl(rdx, rbx);
+ __ shr(rdx, Immediate(KeyedLookupCache::kMapHashShift));
+ __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
+ __ shr(rax, Immediate(String::kHashShift));
+ __ xor_(rdx, rax);
+ __ and_(rdx, Immediate(KeyedLookupCache::kCapacityMask));
+
+ // Load the key (consisting of map and symbol) from the cache and
+ // check for match.
+ ExternalReference cache_keys
+ = ExternalReference::keyed_lookup_cache_keys();
+ __ movq(rdi, rdx);
+ __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
+ __ movq(kScratchRegister, cache_keys);
+ __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
+ __ j(not_equal, &slow);
+ __ movq(rdi, Operand(kScratchRegister, rdi, times_1, kPointerSize));
+ __ cmpq(Operand(rsp, kPointerSize), rdi);
+ __ j(not_equal, &slow);
+
+ // Get field offset which is a 32-bit integer and check that it is
+ // an in-object property.
+ ExternalReference cache_field_offsets
+ = ExternalReference::keyed_lookup_cache_field_offsets();
+ __ movq(kScratchRegister, cache_field_offsets);
+ __ movl(rax, Operand(kScratchRegister, rdx, times_4, 0));
+ __ movzxbq(rdx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
+ __ cmpq(rax, rdx);
+ __ j(above_equal, &slow);
+
+ // Load in-object property.
+ __ subq(rax, rdx);
+ __ movzxbq(rdx, FieldOperand(rbx, Map::kInstanceSizeOffset));
+ __ addq(rax, rdx);
+ __ movq(rax, FieldOperand(rcx, rax, times_pointer_size, 0));
+ __ ret(0);
+
+ // Do a quick inline probe of the receiver's dictionary, if it
+ // exists.
+ __ bind(&probe_dictionary);
+ GenerateDictionaryLoad(masm,
+ &slow,
+ rbx,
+ rcx,
+ rdx,
+ rax,
+ DICTIONARY_CHECK_DONE);
GenerateCheckNonObjectOrLoaded(masm, &slow, rcx);
__ movq(rax, rcx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
@@ -345,6 +409,16 @@
}
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ GenerateGeneric(masm);
+}
+
+
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
@@ -853,9 +927,7 @@
}
-void CallIC::Generate(MacroAssembler* masm,
- int argc,
- ExternalReference const& f) {
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
// Get the receiver of the function from the stack; 1 ~ return address.
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Get the name of the function to call from the stack.
@@ -872,7 +944,7 @@
// Call the entry.
CEntryStub stub(1);
__ movq(rax, Immediate(2));
- __ movq(rbx, f);
+ __ movq(rbx, ExternalReference(IC_Utility(kCallIC_Miss)));
__ CallStub(&stub);
// Move result to rdi and exit the internal frame.
@@ -910,7 +982,7 @@
// rsp[16] argument argc - 1
// ...
// rsp[argc * 8] argument 1
- // rsp[(argc + 1) * 8] argument 0 = reciever
+ // rsp[(argc + 1) * 8] argument 0 = receiver
// rsp[(argc + 2) * 8] function name
// -----------------------------------
Label number, non_number, non_string, boolean, probe, miss;
@@ -963,7 +1035,7 @@
// Cache miss: Jump to runtime.
__ bind(&miss);
- Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+ GenerateMiss(masm, argc);
}
@@ -971,8 +1043,8 @@
int argc,
bool is_global_object,
Label* miss) {
- // Search dictionary - put result in register edx.
- GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx);
+ // Search dictionary - put result in register rdx.
+ GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx, CHECK_DICTIONARY);
// Move the result to register rdi and check that it isn't a smi.
__ movq(rdi, rdx);
@@ -1006,7 +1078,7 @@
// rsp[16] argument argc - 1
// ...
// rsp[argc * 8] argument 1
- // rsp[(argc + 1) * 8] argument 0 = reciever
+ // rsp[(argc + 1) * 8] argument 0 = receiver
// rsp[(argc + 2) * 8] function name
// -----------------------------------
@@ -1065,7 +1137,7 @@
// Cache miss: Jump to runtime.
__ bind(&miss);
- Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+ GenerateMiss(masm, argc);
}
@@ -1196,9 +1268,9 @@
Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &miss);
- // Search the dictionary placing the result in eax.
+ // Search the dictionary placing the result in rax.
__ bind(&probe);
- GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx);
+ GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx, CHECK_DICTIONARY);
GenerateCheckNonObjectOrLoaded(masm, &miss, rax);
__ ret(0);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 7115791..65a408b 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -310,6 +310,12 @@
}
+void MacroAssembler::TailCallStub(CodeStub* stub) {
+ ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
@@ -575,6 +581,17 @@
}
+Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
+ if (first.is(second)) {
+ return CheckSmi(first);
+ }
+ movl(kScratchRegister, first);
+ andl(kScratchRegister, second);
+ testb(kScratchRegister, Immediate(kSmiTagMask));
+ return zero;
+}
+
+
Condition MacroAssembler::CheckIsMinSmi(Register src) {
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
movq(kScratchRegister, src);
@@ -1275,6 +1292,39 @@
}
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
+ Register second_object,
+ Register scratch1,
+ Register scratch2,
+ Label* on_fail) {
+ // Check that both objects are not smis.
+ Condition either_smi = CheckEitherSmi(first_object, second_object);
+ j(either_smi, on_fail);
+
+ // Load instance type for both strings.
+ movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+ movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+ movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+ movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringBits =
+ kNotStringTag | kSeqStringTag | kAsciiStringTag;
+
+ andl(scratch1, Immediate(kFlatAsciiStringMask));
+ andl(scratch2, Immediate(kFlatAsciiStringMask));
+ // Interleave the bits to check both scratch1 and scratch2 in one test.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmpl(scratch1,
+ Immediate(kFlatAsciiStringBits + (kFlatAsciiStringBits << 3)));
+ j(not_equal, on_fail);
+}
+
+
void MacroAssembler::Move(Register dst, Handle<Object> source) {
ASSERT(!source->IsFailure());
if (source->IsSmi()) {
@@ -1339,6 +1389,13 @@
}
+void MacroAssembler::Drop(int stack_elements) {
+ if (stack_elements > 0) {
+ addq(rsp, Immediate(stack_elements * kPointerSize));
+ }
+}
+
+
void MacroAssembler::Test(const Operand& src, Smi* source) {
intptr_t smi = reinterpret_cast<intptr_t>(source);
if (is_int32(smi)) {
@@ -1425,6 +1482,16 @@
}
+void MacroAssembler::PopTryHandler() {
+ ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
+ // Unlink this handler.
+ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+ pop(Operand(kScratchRegister, 0));
+ // Remove the remaining fields.
+ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+}
+
+
void MacroAssembler::Ret() {
ret(0);
}
@@ -2244,12 +2311,114 @@
}
+void MacroAssembler::AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT(kShortSize == 2);
+ // scratch1 = length * 2 + kObjectAlignmentMask.
+ lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
+ and_(scratch1, Immediate(~kObjectAlignmentMask));
+
+ // Allocate two byte string in new space.
+ AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
+ times_1,
+ scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
+ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movl(FieldOperand(result, String::kLengthOffset), length);
+ movl(FieldOperand(result, String::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ movl(scratch1, length);
+ ASSERT(kCharSize == 1);
+ addq(scratch1, Immediate(kObjectAlignmentMask));
+ and_(scratch1, Immediate(~kObjectAlignmentMask));
+
+ // Allocate ascii string in new space.
+ AllocateInNewSpace(SeqAsciiString::kHeaderSize,
+ times_1,
+ scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
+ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+ movl(FieldOperand(result, String::kLengthOffset), length);
+ movl(FieldOperand(result, String::kHashFieldOffset),
+ Immediate(String::kEmptyHashField));
+}
+
+
+void MacroAssembler::AllocateConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ AllocateInNewSpace(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map. The other fields are left uninitialized.
+ LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
+ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ // Allocate heap number in new space.
+ AllocateInNewSpace(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map. The other fields are left uninitialized.
+ LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
+ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+}
+
+
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
// Load the function context (which is the incoming, outer context).
- movq(rax, FieldOperand(rax, JSFunction::kContextOffset));
+ movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
for (int i = 1; i < context_chain_length; i++) {
movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 9e7c25c..ce2848c 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -38,6 +38,9 @@
// function calling convention.
static const Register kScratchRegister = r10;
+// Convenience for platform-independent signatures.
+typedef Operand MemOperand;
+
// Forward declaration.
class JumpTarget;
@@ -201,9 +204,12 @@
// Is the value a positive tagged smi.
Condition CheckPositiveSmi(Register src);
- // Are both values are tagged smis.
+ // Are both values tagged smis.
Condition CheckBothSmi(Register first, Register second);
+ // Are either value a tagged smi.
+ Condition CheckEitherSmi(Register first, Register second);
+
// Is the value the minimum smi value (since we are using
// two's complement numbers, negating the value is known to yield
// a non-smi value).
@@ -400,7 +406,15 @@
void Test(const Operand& dst, Smi* source);
// ---------------------------------------------------------------------------
- // Macro instructions
+ // String macros.
+ void JumpIfNotBothSequentialAsciiStrings(Register first_object,
+ Register second_object,
+ Register scratch1,
+ Register scratch2,
+ Label* on_not_both_flat_ascii);
+
+ // ---------------------------------------------------------------------------
+ // Macro instructions.
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
@@ -413,6 +427,12 @@
void Cmp(const Operand& dst, Handle<Object> source);
void Push(Handle<Object> source);
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the rsp register.
+ void Drop(int stack_elements);
+
+ void Call(Label* target) { call(target); }
+
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(ExternalReference ext);
@@ -443,6 +463,8 @@
// address must be pushed before calling this helper.
void PushTryHandler(CodeLocation try_location, HandlerType type);
+ // Unlink the stack handler on top of the stack from the try handler chain.
+ void PopTryHandler();
// ---------------------------------------------------------------------------
// Inline caching support
@@ -518,6 +540,32 @@
Register scratch,
Label* gc_required);
+ // Allocate a sequential string. All the header fields of the string object
+ // are initialized.
+ void AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+
+ // Allocate a raw cons string object. Only the map field of the result is
+ // initialized.
+ void AllocateConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiConsString(Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
// ---------------------------------------------------------------------------
// Support functions.
@@ -557,6 +605,9 @@
// Call a code stub.
void CallStub(CodeStub* stub);
+ // Tail call a code stub (jump).
+ void TailCallStub(CodeStub* stub);
+
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 639f5e9..75bbf3e 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -60,20 +60,24 @@
* - r8 : code object pointer. Used to convert between absolute and
* code-object-relative addresses.
*
- * The registers rax, rbx, rcx, r9 and r11 are free to use for computations.
+ * The registers rax, rbx, r9 and r11 are free to use for computations.
* If changed to use r12+, they should be saved as callee-save registers.
*
* Each call to a C++ method should retain these registers.
*
* The stack will have the following content, in some order, indexable from the
* frame pointer (see, e.g., kStackHighEnd):
- * - stack_area_base (High end of the memory area to use as
- * backtracking stack)
- * - at_start (if 1, start at start of string, if 0, don't)
- * - int* capture_array (int[num_saved_registers_], for output).
- * - end of input (Address of end of string)
- * - start of input (Address of first character in string)
- * - String** input_string (location of a handle containing the string)
+ * - direct_call (if 1, direct call from JavaScript code, if 0 call
+ * through the runtime system)
+ * - stack_area_base (High end of the memory area to use as
+ * backtracking stack)
+ * - at_start (if 1, we are starting at the start of the
+ * string, otherwise 0)
+ * - int* capture_array (int[num_saved_registers_], for output).
+ * - end of input (Address of end of string)
+ * - start of input (Address of first character in string)
+ * - start index (character index of start)
+ * - String* input_string (input string)
* - return address
* - backup of callee save registers (rbx, possibly rsi and rdi).
* - Offset of location before start of input (effectively character
@@ -90,11 +94,13 @@
* calling the code's entry address cast to a function pointer with the
* following signature:
* int (*match)(String* input_string,
+ * int start_index,
* Address start,
* Address end,
* int* capture_output_array,
* bool at_start,
- * byte* stack_area_base)
+ * byte* stack_area_base,
+ * bool direct_call)
*/
#define __ ACCESS_MASM(masm_)
@@ -490,27 +496,22 @@
bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
- int cp_offset,
- bool check_offset,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
- // (c - min) <= (max - min) check
+ // (c - min) <= (max - min) check, using the sequence:
+ // lea(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
+ // cmp(rax, Immediate(max - min))
switch (type) {
case 's':
// Match space-characters
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
Label success;
__ cmpl(current_character(), Immediate(' '));
__ j(equal, &success);
// Check range 0x09..0x0d
- __ subl(current_character(), Immediate('\t'));
- __ cmpl(current_character(), Immediate('\r' - '\t'));
+ __ lea(rax, Operand(current_character(), -'\t'));
+ __ cmpl(rax, Immediate('\r' - '\t'));
BranchOrBacktrack(above, on_no_match);
__ bind(&success);
return true;
@@ -518,72 +519,105 @@
return false;
case 'S':
// Match non-space characters.
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
__ cmpl(current_character(), Immediate(' '));
BranchOrBacktrack(equal, on_no_match);
- __ subl(current_character(), Immediate('\t'));
- __ cmpl(current_character(), Immediate('\r' - '\t'));
+ __ lea(rax, Operand(current_character(), -'\t'));
+ __ cmpl(rax, Immediate('\r' - '\t'));
BranchOrBacktrack(below_equal, on_no_match);
return true;
}
return false;
case 'd':
// Match ASCII digits ('0'..'9')
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
- __ subl(current_character(), Immediate('0'));
- __ cmpl(current_character(), Immediate('9' - '0'));
+ __ lea(rax, Operand(current_character(), -'0'));
+ __ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(above, on_no_match);
return true;
case 'D':
// Match non ASCII-digits
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
- __ subl(current_character(), Immediate('0'));
- __ cmpl(current_character(), Immediate('9' - '0'));
+ __ lea(rax, Operand(current_character(), -'0'));
+ __ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(below_equal, on_no_match);
return true;
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
- __ xor_(current_character(), Immediate(0x01));
+ __ movl(rax, current_character());
+ __ xor_(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ subl(current_character(), Immediate(0x0b));
- __ cmpl(current_character(), Immediate(0x0c - 0x0b));
+ __ subl(rax, Immediate(0x0b));
+ __ cmpl(rax, Immediate(0x0c - 0x0b));
BranchOrBacktrack(below_equal, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
// computed (current_char ^ 0x01 - 0x0b). I.e., check for
// 0x201d (0x2028 - 0x0b) or 0x201e.
- __ subl(current_character(), Immediate(0x2028 - 0x0b));
- __ cmpl(current_character(), Immediate(1));
+ __ subl(rax, Immediate(0x2028 - 0x0b));
+ __ cmpl(rax, Immediate(0x2029 - 0x2028));
BranchOrBacktrack(below_equal, on_no_match);
}
return true;
}
- case '*':
- // Match any character.
- if (check_offset) {
- CheckPosition(cp_offset, on_no_match);
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ __ movl(rax, current_character());
+ __ xor_(rax, Immediate(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ subl(rax, Immediate(0x0b));
+ __ cmpl(rax, Immediate(0x0c - 0x0b));
+ if (mode_ == ASCII) {
+ BranchOrBacktrack(above, on_no_match);
+ } else {
+ Label done;
+ BranchOrBacktrack(below_equal, &done);
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ subl(rax, Immediate(0x2028 - 0x0b));
+ __ cmpl(rax, Immediate(0x2029 - 0x2028));
+ BranchOrBacktrack(above, on_no_match);
+ __ bind(&done);
}
return true;
- // No custom implementation (yet): w, W, s(UC16), S(UC16).
+ }
+ case 'w': {
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmpl(current_character(), Immediate('z'));
+ BranchOrBacktrack(above, on_no_match);
+ }
+ __ movq(rbx, ExternalReference::re_word_character_map());
+ ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
+ ExternalReference word_map = ExternalReference::re_word_character_map();
+ __ testb(Operand(rbx, current_character(), times_1, 0),
+ current_character());
+ BranchOrBacktrack(zero, on_no_match);
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != ASCII) {
+ // Table is 128 entries, so all ASCII characters can be tested.
+ __ cmpl(current_character(), Immediate('z'));
+ __ j(above, &done);
+ }
+ __ movq(rbx, ExternalReference::re_word_character_map());
+ ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
+ ExternalReference word_map = ExternalReference::re_word_character_map();
+ __ testb(Operand(rbx, current_character(), times_1, 0),
+ current_character());
+ BranchOrBacktrack(not_zero, on_no_match);
+ if (mode_ != ASCII) {
+ __ bind(&done);
+ }
+ return true;
+ }
+
+ case '*':
+ // Match any character.
+ return true;
+ // No custom implementation (yet): s(UC16), S(UC16).
default:
return false;
}
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index 3e6720d..694cba0 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -73,8 +73,6 @@
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type,
- int cp_offset,
- bool check_offset,
Label* on_no_match);
virtual void Fail();
virtual Handle<Object> GetCode(Handle<String> source);
@@ -143,6 +141,8 @@
// AtStart is passed as 32 bit int (values 0 or 1).
static const int kAtStart = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kAtStart + kPointerSize;
+ // DirectCall is passed as 32 bit int (values 0 or 1).
+ static const int kDirectCall = kStackHighEnd + kPointerSize;
#else
// In AMD64 ABI Calling Convention, the first six integer parameters
// are passed as registers, and caller must allocate space on the stack
@@ -154,6 +154,7 @@
static const int kRegisterOutput = kInputEnd - kPointerSize;
static const int kAtStart = kRegisterOutput - kPointerSize;
static const int kStackHighEnd = kFrameAlign;
+ static const int kDirectCall = kStackHighEnd + kPointerSize;
#endif
#ifdef _WIN64
diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h
index c4f3a85..015ba13 100644
--- a/src/x64/simulator-x64.h
+++ b/src/x64/simulator-x64.h
@@ -53,9 +53,9 @@
};
// Call the generated regexp code directly. The entry function pointer should
-// expect seven int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- entry(p0, p1, p2, p3, p4, p5, p6)
+// expect eight int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ entry(p0, p1, p2, p3, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 55b0b87..8d600a5 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -688,7 +688,7 @@
// rsp[16] argument argc - 1
// ...
// rsp[argc * 8] argument 1
- // rsp[(argc + 1) * 8] argument 0 = reciever
+ // rsp[(argc + 1) * 8] argument 0 = receiver
// rsp[(argc + 2) * 8] function name
Label miss;
@@ -721,47 +721,62 @@
break;
case STRING_CHECK:
- // Check that the object is a two-byte string or a symbol.
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rcx);
- __ j(above_equal, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::STRING_FUNCTION_INDEX,
- rcx);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
- rbx, rdx, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ // Check that the object is a two-byte string or a symbol.
+ __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rcx);
+ __ j(above_equal, &miss);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ rcx);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+ rbx, rdx, name, &miss);
+ }
break;
case NUMBER_CHECK: {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(rdx, &fast);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::NUMBER_FUNCTION_INDEX,
- rcx);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
- rbx, rdx, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ JumpIfSmi(rdx, &fast);
+ __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
+ __ j(not_equal, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::NUMBER_FUNCTION_INDEX,
+ rcx);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+ rbx, rdx, name, &miss);
+ }
break;
}
case BOOLEAN_CHECK: {
- Label fast;
- // Check that the object is a boolean.
- __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
- __ j(equal, &fast);
- __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateLoadGlobalFunctionPrototype(masm(),
- Context::BOOLEAN_FUNCTION_INDEX,
- rcx);
- CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
- rbx, rdx, name, &miss);
+ if (!function->IsBuiltin()) {
+ // Calling non-builtins with a value as receiver requires boxing.
+ __ jmp(&miss);
+ } else {
+ Label fast;
+ // Check that the object is a boolean.
+ __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
+ __ j(equal, &fast);
+ __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
+ __ j(not_equal, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::BOOLEAN_FUNCTION_INDEX,
+ rcx);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+ rbx, rdx, name, &miss);
+ }
break;
}
@@ -956,8 +971,24 @@
__ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- __ Cmp(rdi, Handle<JSFunction>(function));
- __ j(not_equal, &miss);
+ if (Heap::InNewSpace(function)) {
+ // We can't embed a pointer to a function in new space so we have
+ // to verify that the shared function info is unchanged. This has
+ // the nice side effect that multiple closures based on the same
+ // function can all use this call IC. Before we load through the
+ // function, we have to verify that it still is a function.
+ __ JumpIfSmi(rdi, &miss);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(not_equal, &miss);
+
+ // Check the shared function info. Make sure it hasn't changed.
+ __ Move(rcx, Handle<SharedFunctionInfo>(function->shared()));
+ __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rcx);
+ __ j(not_equal, &miss);
+ } else {
+ __ Cmp(rdi, Handle<JSFunction>(function));
+ __ j(not_equal, &miss);
+ }
// Patch the receiver on the stack with the global proxy.
if (object->IsGlobalObject()) {
@@ -987,10 +1018,10 @@
}
-Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+Object* LoadStubCompiler::CompileLoadCallback(String* name,
+ JSObject* object,
JSObject* holder,
- AccessorInfo* callback,
- String* name) {
+ AccessorInfo* callback) {
// ----------- S t a t e -------------
// -- rcx : name
// -- rsp[0] : return address
@@ -999,8 +1030,11 @@
Label miss;
__ movq(rax, Operand(rsp, kPointerSize));
- GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
- callback, name, &miss);
+ Failure* failure = Failure::InternalError();
+ bool success = GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
+ callback, name, &miss, &failure);
+ if (!success) return failure;
+
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1154,8 +1188,11 @@
__ Cmp(rax, Handle<String>(name));
__ j(not_equal, &miss);
- GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx,
- callback, name, &miss);
+ Failure* failure = Failure::InternalError();
+ bool success = GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx,
+ callback, name, &miss, &failure);
+ if (!success) return failure;
+
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_callback, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1610,7 +1647,7 @@
}
-void StubCompiler::GenerateLoadCallback(JSObject* object,
+bool StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
@@ -1618,7 +1655,8 @@
Register scratch2,
AccessorInfo* callback,
String* name,
- Label* miss) {
+ Label* miss,
+ Failure** failure) {
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, miss);
@@ -1641,6 +1679,8 @@
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(load_callback_property, 5, 1);
+
+ return true;
}
@@ -1802,17 +1842,15 @@
// depending on the this.x = ...; assignment in the function.
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
- Label not_passed;
- // Set the property to undefined.
- __ movq(Operand(r9, i * kPointerSize), r8);
// Check if the argument assigned to the property is actually passed.
+ // If argument is not passed the property is set to undefined,
+ // otherwise find it on the stack.
int arg_number = shared->GetThisPropertyAssignmentArgument(i);
+ __ movq(rbx, r8);
__ cmpq(rax, Immediate(arg_number));
- __ j(below_equal, ¬_passed);
- // Argument passed - find it on the stack.
- __ movq(rbx, Operand(rcx, arg_number * -kPointerSize));
+ __ cmovq(above, rbx, Operand(rcx, arg_number * -kPointerSize));
+ // Store value in the property.
__ movq(Operand(r9, i * kPointerSize), rbx);
- __ bind(¬_passed);
} else {
// Set the property to the constant value.
Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index fe65d34..6e84ed1 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -129,11 +129,29 @@
Handle<Object> undefined = Factory::undefined_value();
FrameElement initial_value =
FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
- __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
+ if (count == 1) {
+ __ Push(undefined);
+ } else if (count < kLocalVarBound) {
+ // For less locals the unrolled loop is more compact.
+ __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
+ for (int i = 0; i < count; i++) {
+ __ push(kScratchRegister);
+ }
+ } else {
+ // For more locals a loop in generated code is more compact.
+ Label alloc_locals_loop;
+ Result cnt = cgen()->allocator()->Allocate();
+ ASSERT(cnt.is_valid());
+ __ movq(cnt.reg(), Immediate(count));
+ __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
+ __ bind(&alloc_locals_loop);
+ __ push(kScratchRegister);
+ __ decl(cnt.reg());
+ __ j(not_zero, &alloc_locals_loop);
+ }
for (int i = 0; i < count; i++) {
elements_.Add(initial_value);
stack_pointer_++;
- __ push(kScratchRegister);
}
}
}
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index e492305..8e3e40f 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -200,6 +200,9 @@
// shared return site. Emits code for spills.
void PrepareForReturn();
+ // Number of local variables after when we use a loop for allocating.
+ static const int kLocalVarBound = 7;
+
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots();
@@ -340,7 +343,7 @@
// of the frame. Key and receiver are not dropped.
Result CallKeyedStoreIC();
- // Call call IC. Arguments, reciever, and function name are found
+ // Call call IC. Arguments, receiver, and function name are found
// on top of the frame. Function name slot is not dropped. The
// argument count does not include the receiver.
Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 6d6c174..7e67c00 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -447,6 +447,40 @@
}
+THREADED_TEST(ScavengeExternalString) {
+ TestResource::dispose_count = 0;
+ {
+ v8::HandleScope scope;
+ uint16_t* two_byte_string = AsciiToTwoByteString("test string");
+ Local<String> string =
+ String::NewExternal(new TestResource(two_byte_string));
+ i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
+ i::Heap::CollectGarbage(0, i::NEW_SPACE);
+ CHECK(i::Heap::InNewSpace(*istring));
+ CHECK_EQ(0, TestResource::dispose_count);
+ }
+ i::Heap::CollectGarbage(0, i::NEW_SPACE);
+ CHECK_EQ(1, TestResource::dispose_count);
+}
+
+
+THREADED_TEST(ScavengeExternalAsciiString) {
+ TestAsciiResource::dispose_count = 0;
+ {
+ v8::HandleScope scope;
+ const char* one_byte_string = "test string";
+ Local<String> string = String::NewExternal(
+ new TestAsciiResource(i::StrDup(one_byte_string)));
+ i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
+ i::Heap::CollectGarbage(0, i::NEW_SPACE);
+ CHECK(i::Heap::InNewSpace(*istring));
+ CHECK_EQ(0, TestAsciiResource::dispose_count);
+ }
+ i::Heap::CollectGarbage(0, i::NEW_SPACE);
+ CHECK_EQ(1, TestAsciiResource::dispose_count);
+}
+
+
THREADED_TEST(StringConcat) {
{
v8::HandleScope scope;
@@ -2754,6 +2788,10 @@
static v8::Handle<Value> CallFun(const v8::Arguments& args) {
ApiTestFuzzer::Fuzz();
+ if (args.IsConstructCall()) {
+ args.This()->Set(v8_str("data"), args.Data());
+ return v8::Null();
+ }
return args.Data();
}
@@ -2795,6 +2833,21 @@
}
+THREADED_TEST(NativeFunctionConstructCall) {
+ v8::RegisterExtension(new FunctionExtension());
+ v8::HandleScope handle_scope;
+ static const char* exts[1] = { "functiontest" };
+ v8::ExtensionConfiguration config(1, exts);
+ LocalContext context(&config);
+ CHECK_EQ(v8::Integer::New(8),
+ Script::Compile(v8_str("(new A()).data"))->Run());
+ CHECK_EQ(v8::Integer::New(7),
+ Script::Compile(v8_str("(new B()).data"))->Run());
+ CHECK_EQ(v8::Integer::New(6),
+ Script::Compile(v8_str("(new C()).data"))->Run());
+}
+
+
static const char* last_location;
static const char* last_message;
void StoringErrorCallback(const char* location, const char* message) {
@@ -4861,8 +4914,7 @@
CHECK_EQ(17, value->Int32Value());
// Check that the call-as-function handler can be called through
- // new. Currently, there is no way to check in the call-as-function
- // handler if it has been called through new or not.
+ // new.
value = CompileRun("new obj(43)");
CHECK(!try_catch.HasCaught());
CHECK_EQ(-43, value->Int32Value());
@@ -6708,6 +6760,27 @@
v8::ScriptData::PreCompile(script, i::StrLength(script));
CHECK_NE(sd->Length(), 0);
CHECK_NE(sd->Data(), NULL);
+ CHECK(!sd->HasError());
+ delete sd;
+}
+
+
+TEST(PreCompileWithError) {
+ v8::V8::Initialize();
+ const char *script = "function foo(a) { return 1 * * 2; }";
+ v8::ScriptData *sd =
+ v8::ScriptData::PreCompile(script, i::StrLength(script));
+ CHECK(sd->HasError());
+ delete sd;
+}
+
+
+TEST(Regress31661) {
+ v8::V8::Initialize();
+ const char *script = " The Definintive Guide";
+ v8::ScriptData *sd =
+ v8::ScriptData::PreCompile(script, i::StrLength(script));
+ CHECK(sd->HasError());
delete sd;
}
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 5b72193..cd0da1b 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -443,6 +443,9 @@
// Check that the debugger has been fully unloaded.
static void CheckDebuggerUnloaded(bool check_functions = false) {
+ // Let debugger to unload itself synchronously
+ v8::Debug::ProcessDebugMessages();
+
v8::internal::CheckDebuggerUnloaded(check_functions);
}
@@ -2160,6 +2163,155 @@
CheckDebuggerUnloaded();
}
+// Copies a C string to a 16-bit string. Does not check for buffer overflow.
+// Does not use the V8 engine to convert strings, so it can be used
+// in any thread. Returns the length of the string.
+int AsciiToUtf16(const char* input_buffer, uint16_t* output_buffer) {
+ int i;
+ for (i = 0; input_buffer[i] != '\0'; ++i) {
+ // ASCII does not use chars > 127, but be careful anyway.
+ output_buffer[i] = static_cast<unsigned char>(input_buffer[i]);
+ }
+ output_buffer[i] = 0;
+ return i;
+}
+
+// Copies a 16-bit string to a C string by dropping the high byte of
+// each character. Does not check for buffer overflow.
+// Can be used in any thread. Requires string length as an input.
+int Utf16ToAscii(const uint16_t* input_buffer, int length,
+ char* output_buffer, int output_len = -1) {
+ if (output_len >= 0) {
+ if (length > output_len - 1) {
+ length = output_len - 1;
+ }
+ }
+
+ for (int i = 0; i < length; ++i) {
+ output_buffer[i] = static_cast<char>(input_buffer[i]);
+ }
+ output_buffer[length] = '\0';
+ return length;
+}
+
+
+// We match parts of the message to get evaluate result int value.
+bool GetEvaluateStringResult(char *message, char* buffer, int buffer_size) {
+ const char* value = "\"value\":";
+ char* pos = strstr(message, value);
+ if (pos == NULL) {
+ return false;
+ }
+ Vector<char> buf(buffer, buffer_size);
+ OS::StrNCpy(buf, pos, buffer_size);
+ buffer[buffer_size - 1] = '\0';
+ return true;
+}
+
+
+struct EvaluateResult {
+ static const int kBufferSize = 20;
+ char buffer[kBufferSize];
+};
+
+struct DebugProcessDebugMessagesData {
+ static const int kArraySize = 5;
+ int counter;
+ EvaluateResult results[kArraySize];
+
+ void reset() {
+ counter = 0;
+ }
+ EvaluateResult* current() {
+ return &results[counter % kArraySize];
+ }
+ void next() {
+ counter++;
+ }
+};
+
+DebugProcessDebugMessagesData process_debug_messages_data;
+
+static void DebugProcessDebugMessagesHandler(
+ const uint16_t* message,
+ int length,
+ v8::Debug::ClientData* client_data) {
+
+ const int kBufferSize = 100000;
+ char print_buffer[kBufferSize];
+ Utf16ToAscii(message, length, print_buffer, kBufferSize);
+
+ EvaluateResult* array_item = process_debug_messages_data.current();
+
+ bool res = GetEvaluateStringResult(print_buffer,
+ array_item->buffer,
+ EvaluateResult::kBufferSize);
+ if (res) {
+ process_debug_messages_data.next();
+ }
+}
+
+// Test that the evaluation of expressions works even from ProcessDebugMessages
+// i.e. with empty stack.
+TEST(DebugEvaluateWithoutStack) {
+ v8::Debug::SetMessageHandler(DebugProcessDebugMessagesHandler);
+
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ const char* source =
+ "var v1 = 'Pinguin';\n function getAnimal() { return 'Capy' + 'bara'; }";
+
+ v8::Script::Compile(v8::String::New(source))->Run();
+
+ v8::Debug::ProcessDebugMessages();
+
+ const int kBufferSize = 1000;
+ uint16_t buffer[kBufferSize];
+
+ const char* command_111 = "{\"seq\":111,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{"
+ " \"global\":true,"
+ " \"expression\":\"v1\",\"disable_break\":true"
+ "}}";
+
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(command_111, buffer));
+
+ const char* command_112 = "{\"seq\":112,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{"
+ " \"global\":true,"
+ " \"expression\":\"getAnimal()\",\"disable_break\":true"
+ "}}";
+
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(command_112, buffer));
+
+ const char* command_113 = "{\"seq\":113,"
+ "\"type\":\"request\","
+ "\"command\":\"evaluate\","
+ "\"arguments\":{"
+ " \"global\":true,"
+ " \"expression\":\"239 + 566\",\"disable_break\":true"
+ "}}";
+
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(command_113, buffer));
+
+ v8::Debug::ProcessDebugMessages();
+
+ CHECK_EQ(3, process_debug_messages_data.counter);
+
+ CHECK(strcmp("Pinguin", process_debug_messages_data.results[0].buffer));
+ CHECK(strcmp("Captbara", process_debug_messages_data.results[1].buffer));
+ CHECK(strcmp("805", process_debug_messages_data.results[2].buffer));
+
+ v8::Debug::SetMessageHandler(NULL);
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
// Simple test of the stepping mechanism using only store ICs.
TEST(DebugStepLinear) {
@@ -3141,6 +3293,39 @@
CheckDebuggerUnloaded();
}
+static const char* kSimpleExtensionSource =
+ "(function Foo() {"
+ " return 4;"
+ "})() ";
+
+// http://crbug.com/28933
+// Test that debug break is disabled when bootstrapper is active.
+TEST(NoBreakWhenBootstrapping) {
+ v8::HandleScope scope;
+
+ // Register a debug event listener which sets the break flag and counts.
+ v8::Debug::SetDebugEventListener(DebugEventCounter);
+
+ // Set the debug break flag.
+ v8::Debug::DebugBreak();
+ break_point_hit_count = 0;
+ {
+ // Create a context with an extension to make sure that some JavaScript
+ // code is executed during bootstrapping.
+ v8::RegisterExtension(new v8::Extension("simpletest",
+ kSimpleExtensionSource));
+ const char* extension_names[] = { "simpletest" };
+ v8::ExtensionConfiguration extensions(1, extension_names);
+ v8::Persistent<v8::Context> context = v8::Context::New(&extensions);
+ context.Dispose();
+ }
+ // Check that no DebugBreak events occured during the context creation.
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Get rid of the debug event listener.
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
static v8::Handle<v8::Array> NamedEnum(const v8::AccessorInfo&) {
v8::Handle<v8::Array> result = v8::Array::New(3);
@@ -3557,31 +3742,6 @@
// Support classes
-// Copies a C string to a 16-bit string. Does not check for buffer overflow.
-// Does not use the V8 engine to convert strings, so it can be used
-// in any thread. Returns the length of the string.
-int AsciiToUtf16(const char* input_buffer, uint16_t* output_buffer) {
- int i;
- for (i = 0; input_buffer[i] != '\0'; ++i) {
- // ASCII does not use chars > 127, but be careful anyway.
- output_buffer[i] = static_cast<unsigned char>(input_buffer[i]);
- }
- output_buffer[i] = 0;
- return i;
-}
-
-// Copies a 16-bit string to a C string by dropping the high byte of
-// each character. Does not check for buffer overflow.
-// Can be used in any thread. Requires string length as an input.
-int Utf16ToAscii(const uint16_t* input_buffer, int length,
- char* output_buffer) {
- for (int i = 0; i < length; ++i) {
- output_buffer[i] = static_cast<char>(input_buffer[i]);
- }
- output_buffer[length] = '\0';
- return length;
-}
-
// Provides synchronization between k threads, where k is an input to the
// constructor. The Wait() call blocks a thread until it is called for the
// k'th time, then all calls return. Each ThreadBarrier object can only
@@ -5622,6 +5782,51 @@
}
+static int counting_message_handler_counter;
+
+static void CountingMessageHandler(const v8::Debug::Message& message) {
+ counting_message_handler_counter++;
+}
+
+// Test that debug messages get processed when ProcessDebugMessages is called.
+TEST(ProcessDebugMessages) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+
+ counting_message_handler_counter = 0;
+
+ v8::Debug::SetMessageHandler2(CountingMessageHandler);
+
+ const int kBufferSize = 1000;
+ uint16_t buffer[kBufferSize];
+ const char* scripts_command =
+ "{\"seq\":0,"
+ "\"type\":\"request\","
+ "\"command\":\"scripts\"}";
+
+ // Send scripts command.
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
+
+ CHECK_EQ(0, counting_message_handler_counter);
+ v8::Debug::ProcessDebugMessages();
+ // At least one message should come
+ CHECK_GE(counting_message_handler_counter, 1);
+
+ counting_message_handler_counter = 0;
+
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
+ v8::Debug::SendCommand(buffer, AsciiToUtf16(scripts_command, buffer));
+ CHECK_EQ(0, counting_message_handler_counter);
+ v8::Debug::ProcessDebugMessages();
+ // At least two messages should come
+ CHECK_GE(counting_message_handler_counter, 2);
+
+ // Get rid of the debug message handler.
+ v8::Debug::SetMessageHandler2(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
TEST(GetMirror) {
v8::HandleScope scope;
DebugLocalContext env;
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index b8b3364..ba4eec2 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -57,7 +57,7 @@
TEST(DisasmIa320) {
InitializeVM();
v8::HandleScope scope;
- v8::internal::byte buffer[1024];
+ v8::internal::byte buffer[2048];
Assembler assm(buffer, sizeof buffer);
DummyStaticFunction(NULL); // just bloody use it (DELETE; debugging)
@@ -223,13 +223,16 @@
__ sub(Operand(ebx), Immediate(12));
__ sub(Operand(edx, ecx, times_4, 10000), Immediate(12));
+ __ subb(Operand(edx, ecx, times_4, 10000), 100);
+ __ subb(Operand(eax), 100);
+ __ subb(eax, Operand(edx, ecx, times_4, 10000));
__ xor_(ebx, 12345);
__ imul(edx, ecx, 12);
__ imul(edx, ecx, 1000);
-
+ __ rep_movs();
__ sub(edx, Operand(ebx, ecx, times_4, 10000));
__ sub(edx, Operand(ebx));
@@ -365,6 +368,12 @@
__ movdbl(xmm1, Operand(ebx, ecx, times_4, 10000));
__ movdbl(Operand(ebx, ecx, times_4, 10000), xmm1);
__ comisd(xmm0, xmm1);
+
+ // 128 bit move instructions.
+ __ movdqa(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ movdqa(Operand(ebx, ecx, times_4, 10000), xmm0);
+ __ movdqu(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ movdqu(Operand(ebx, ecx, times_4, 10000), xmm0);
}
// cmov.
diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc
index 511b933..3b8905b 100755
--- a/test/cctest/test-macro-assembler-x64.cc
+++ b/test/cctest/test-macro-assembler-x64.cc
@@ -91,14 +91,14 @@
TEST(Smi) {
// Check that C++ Smi operations work as expected.
- intptr_t test_numbers[] = {
+ int64_t test_numbers[] = {
0, 1, -1, 127, 128, -128, -129, 255, 256, -256, -257,
- Smi::kMaxValue, static_cast<intptr_t>(Smi::kMaxValue) + 1,
- Smi::kMinValue, static_cast<intptr_t>(Smi::kMinValue) - 1
+ Smi::kMaxValue, static_cast<int64_t>(Smi::kMaxValue) + 1,
+ Smi::kMinValue, static_cast<int64_t>(Smi::kMinValue) - 1
};
int test_number_count = 15;
for (int i = 0; i < test_number_count; i++) {
- intptr_t number = test_numbers[i];
+ int64_t number = test_numbers[i];
bool is_valid = Smi::IsValid(number);
bool is_in_range = number >= Smi::kMinValue && number <= Smi::kMaxValue;
CHECK_EQ(is_in_range, is_valid);
@@ -108,8 +108,8 @@
Smi* smi_from_int = Smi::FromInt(static_cast<int32_t>(number));
CHECK_EQ(smi_from_int, smi_from_intptr);
}
- int smi_value = smi_from_intptr->value();
- CHECK_EQ(number, static_cast<intptr_t>(smi_value));
+ int64_t smi_value = smi_from_intptr->value();
+ CHECK_EQ(number, smi_value);
}
}
}
diff --git a/test/cctest/test-regexp.cc b/test/cctest/test-regexp.cc
index 6aa0730..c72c4d1 100644
--- a/test/cctest/test-regexp.cc
+++ b/test/cctest/test-regexp.cc
@@ -58,6 +58,16 @@
using namespace v8::internal;
+static bool CheckParse(const char* input) {
+ V8::Initialize(NULL);
+ v8::HandleScope scope;
+ ZoneScope zone_scope(DELETE_ON_EXIT);
+ FlatStringReader reader(CStrVector(input));
+ RegExpCompileData result;
+ return v8::internal::ParseRegExp(&reader, false, &result);
+}
+
+
static SmartPointer<const char> Parse(const char* input) {
V8::Initialize(NULL);
v8::HandleScope scope;
@@ -106,7 +116,7 @@
}
-
+#define CHECK_PARSE_ERROR(input) CHECK(!CheckParse(input))
#define CHECK_PARSE_EQ(input, expected) CHECK_EQ(expected, *Parse(input))
#define CHECK_SIMPLE(input, simple) CHECK_EQ(simple, CheckSimple(input));
#define CHECK_MIN_MAX(input, min, max) \
@@ -117,6 +127,9 @@
TEST(Parser) {
V8::Initialize(NULL);
+
+ CHECK_PARSE_ERROR("?");
+
CHECK_PARSE_EQ("abc", "'abc'");
CHECK_PARSE_EQ("", "%");
CHECK_PARSE_EQ("abc|def", "(| 'abc' 'def')");
@@ -600,6 +613,34 @@
}
}
+// Test of debug-only syntax.
+#ifdef DEBUG
+
+TEST(ParsePossessiveRepetition) {
+ bool old_flag_value = FLAG_regexp_possessive_quantifier;
+
+ // Enable possessive quantifier syntax.
+ FLAG_regexp_possessive_quantifier = true;
+
+ CHECK_PARSE_EQ("a*+", "(# 0 - p 'a')");
+ CHECK_PARSE_EQ("a++", "(# 1 - p 'a')");
+ CHECK_PARSE_EQ("a?+", "(# 0 1 p 'a')");
+ CHECK_PARSE_EQ("a{10,20}+", "(# 10 20 p 'a')");
+ CHECK_PARSE_EQ("za{10,20}+b", "(: 'z' (# 10 20 p 'a') 'b')");
+
+ // Disable possessive quantifier syntax.
+ FLAG_regexp_possessive_quantifier = false;
+
+ CHECK_PARSE_ERROR("a*+");
+ CHECK_PARSE_ERROR("a++");
+ CHECK_PARSE_ERROR("a?+");
+ CHECK_PARSE_ERROR("a{10,20}+");
+ CHECK_PARSE_ERROR("a{10,20}+b");
+
+ FLAG_regexp_possessive_quantifier = old_flag_value;
+}
+
+#endif
// Tests of interpreter.
@@ -1550,7 +1591,68 @@
}
+TEST(CanonicalizeCharacterSets) {
+ ZoneScope scope(DELETE_ON_EXIT);
+ ZoneList<CharacterRange>* list = new ZoneList<CharacterRange>(4);
+ CharacterSet set(list);
+
+ list->Add(CharacterRange(10, 20));
+ list->Add(CharacterRange(30, 40));
+ list->Add(CharacterRange(50, 60));
+ set.Canonicalize();
+ ASSERT_EQ(3, list->length());
+ ASSERT_EQ(10, list->at(0).from());
+ ASSERT_EQ(20, list->at(0).to());
+ ASSERT_EQ(30, list->at(1).from());
+ ASSERT_EQ(40, list->at(1).to());
+ ASSERT_EQ(50, list->at(2).from());
+ ASSERT_EQ(60, list->at(2).to());
+
+ list->Rewind(0);
+ list->Add(CharacterRange(10, 20));
+ list->Add(CharacterRange(50, 60));
+ list->Add(CharacterRange(30, 40));
+ set.Canonicalize();
+ ASSERT_EQ(3, list->length());
+ ASSERT_EQ(10, list->at(0).from());
+ ASSERT_EQ(20, list->at(0).to());
+ ASSERT_EQ(30, list->at(1).from());
+ ASSERT_EQ(40, list->at(1).to());
+ ASSERT_EQ(50, list->at(2).from());
+ ASSERT_EQ(60, list->at(2).to());
+
+ list->Rewind(0);
+ list->Add(CharacterRange(30, 40));
+ list->Add(CharacterRange(10, 20));
+ list->Add(CharacterRange(25, 25));
+ list->Add(CharacterRange(100, 100));
+ list->Add(CharacterRange(1, 1));
+ set.Canonicalize();
+ ASSERT_EQ(5, list->length());
+ ASSERT_EQ(1, list->at(0).from());
+ ASSERT_EQ(1, list->at(0).to());
+ ASSERT_EQ(10, list->at(1).from());
+ ASSERT_EQ(20, list->at(1).to());
+ ASSERT_EQ(25, list->at(2).from());
+ ASSERT_EQ(25, list->at(2).to());
+ ASSERT_EQ(30, list->at(3).from());
+ ASSERT_EQ(40, list->at(3).to());
+ ASSERT_EQ(100, list->at(4).from());
+ ASSERT_EQ(100, list->at(4).to());
+
+ list->Rewind(0);
+ list->Add(CharacterRange(10, 19));
+ list->Add(CharacterRange(21, 30));
+ list->Add(CharacterRange(20, 20));
+ set.Canonicalize();
+ ASSERT_EQ(1, list->length());
+ ASSERT_EQ(10, list->at(0).from());
+ ASSERT_EQ(30, list->at(0).to());
+}
+
+
+
TEST(Graph) {
V8::Initialize(NULL);
- Execute("(?:(?:x(.))?\1)+$", false, true, true);
+ Execute("\\b\\w+\\b", false, true, true);
}
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index 8f4441a..6d07426 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2007-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -37,6 +37,8 @@
#include "scopeinfo.h"
#include "snapshot.h"
#include "cctest.h"
+#include "spaces.h"
+#include "objects.h"
using namespace v8::internal;
@@ -115,10 +117,6 @@
ExternalReference(&Counters::keyed_load_function_prototype);
CHECK_EQ(make_code(STATS_COUNTER, Counters::k_keyed_load_function_prototype),
encoder.Encode(keyed_load_function_prototype.address()));
- ExternalReference passed_function =
- ExternalReference::builtin_passed_function();
- CHECK_EQ(make_code(UNCLASSIFIED, 1),
- encoder.Encode(passed_function.address()));
ExternalReference the_hole_value_location =
ExternalReference::the_hole_value_location();
CHECK_EQ(make_code(UNCLASSIFIED, 2),
@@ -158,8 +156,6 @@
decoder.Decode(
make_code(STATS_COUNTER,
Counters::k_keyed_load_function_prototype)));
- CHECK_EQ(ExternalReference::builtin_passed_function().address(),
- decoder.Decode(make_code(UNCLASSIFIED, 1)));
CHECK_EQ(ExternalReference::the_hole_value_location().address(),
decoder.Decode(make_code(UNCLASSIFIED, 2)));
CHECK_EQ(ExternalReference::address_of_stack_limit().address(),
@@ -277,6 +273,234 @@
}
+class FileByteSink : public SnapshotByteSink {
+ public:
+ explicit FileByteSink(const char* snapshot_file) {
+ fp_ = OS::FOpen(snapshot_file, "wb");
+ file_name_ = snapshot_file;
+ if (fp_ == NULL) {
+ PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
+ exit(1);
+ }
+ }
+ virtual ~FileByteSink() {
+ if (fp_ != NULL) {
+ fclose(fp_);
+ }
+ }
+ virtual void Put(int byte, const char* description) {
+ if (fp_ != NULL) {
+ fputc(byte, fp_);
+ }
+ }
+ virtual int Position() {
+ return ftell(fp_);
+ }
+ void WriteSpaceUsed(
+ int new_space_used,
+ int pointer_space_used,
+ int data_space_used,
+ int code_space_used,
+ int map_space_used,
+ int cell_space_used,
+ int large_space_used);
+
+ private:
+ FILE* fp_;
+ const char* file_name_;
+};
+
+
+void FileByteSink::WriteSpaceUsed(
+ int new_space_used,
+ int pointer_space_used,
+ int data_space_used,
+ int code_space_used,
+ int map_space_used,
+ int cell_space_used,
+ int large_space_used) {
+ int file_name_length = strlen(file_name_) + 10;
+ Vector<char> name = Vector<char>::New(file_name_length + 1);
+ OS::SNPrintF(name, "%s.size", file_name_);
+ FILE* fp = OS::FOpen(name.start(), "w");
+ fprintf(fp, "new %d\n", new_space_used);
+ fprintf(fp, "pointer %d\n", pointer_space_used);
+ fprintf(fp, "data %d\n", data_space_used);
+ fprintf(fp, "code %d\n", code_space_used);
+ fprintf(fp, "map %d\n", map_space_used);
+ fprintf(fp, "cell %d\n", cell_space_used);
+ fprintf(fp, "large %d\n", large_space_used);
+ fclose(fp);
+}
+
+
+TEST(PartialSerialization) {
+ Serializer::Enable();
+ v8::V8::Initialize();
+ v8::Persistent<v8::Context> env = v8::Context::New();
+ env->Enter();
+
+ v8::HandleScope handle_scope;
+ v8::Local<v8::String> foo = v8::String::New("foo");
+
+ FileByteSink file(FLAG_testing_serialization_file);
+ Serializer ser(&file);
+ i::Handle<i::String> internal_foo = v8::Utils::OpenHandle(*foo);
+ Object* raw_foo = *internal_foo;
+ ser.SerializePartial(&raw_foo);
+ file.WriteSpaceUsed(ser.CurrentAllocationAddress(NEW_SPACE),
+ ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
+ ser.CurrentAllocationAddress(OLD_DATA_SPACE),
+ ser.CurrentAllocationAddress(CODE_SPACE),
+ ser.CurrentAllocationAddress(MAP_SPACE),
+ ser.CurrentAllocationAddress(CELL_SPACE),
+ ser.CurrentAllocationAddress(LO_SPACE));
+}
+
+
+DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
+ v8::V8::Initialize();
+ const char* file_name = FLAG_testing_serialization_file;
+ int file_name_length = strlen(file_name) + 10;
+ Vector<char> name = Vector<char>::New(file_name_length + 1);
+ OS::SNPrintF(name, "%s.size", file_name);
+ FILE* fp = OS::FOpen(name.start(), "r");
+ int new_size, pointer_size, data_size, code_size, map_size, cell_size;
+ int large_size;
+#ifdef _MSC_VER
+ // Avoid warning about unsafe fscanf from MSVC.
+ // Please note that this is only fine if %c and %s are not being used.
+#define fscanf fscanf_s
+#endif
+ CHECK_EQ(1, fscanf(fp, "new %d\n", &new_size));
+ CHECK_EQ(1, fscanf(fp, "pointer %d\n", &pointer_size));
+ CHECK_EQ(1, fscanf(fp, "data %d\n", &data_size));
+ CHECK_EQ(1, fscanf(fp, "code %d\n", &code_size));
+ CHECK_EQ(1, fscanf(fp, "map %d\n", &map_size));
+ CHECK_EQ(1, fscanf(fp, "cell %d\n", &cell_size));
+ CHECK_EQ(1, fscanf(fp, "large %d\n", &large_size));
+#ifdef _MSC_VER
+#undef fscanf
+#endif
+ fclose(fp);
+ Heap::ReserveSpace(new_size,
+ pointer_size,
+ data_size,
+ code_size,
+ map_size,
+ cell_size,
+ large_size);
+ int snapshot_size = 0;
+ byte* snapshot = ReadBytes(file_name, &snapshot_size);
+ SnapshotByteSource source(snapshot, snapshot_size);
+ Deserializer deserializer(&source);
+ Object* root;
+ deserializer.DeserializePartial(&root);
+ CHECK(root->IsString());
+}
+
+
+TEST(LinearAllocation) {
+ v8::V8::Initialize();
+ int new_space_max = 512 * KB;
+ for (int size = 1000; size < 5 * MB; size += size >> 1) {
+ int new_space_size = (size < new_space_max) ? size : new_space_max;
+ Heap::ReserveSpace(
+ new_space_size,
+ size, // Old pointer space.
+ size, // Old data space.
+ size, // Code space.
+ size, // Map space.
+ size, // Cell space.
+ size); // Large object space.
+ LinearAllocationScope linear_allocation_scope;
+ const int kSmallFixedArrayLength = 4;
+ const int kSmallFixedArraySize =
+ FixedArray::kHeaderSize + kSmallFixedArrayLength * kPointerSize;
+ const int kSmallStringLength = 16;
+ const int kSmallStringSize =
+ SeqAsciiString::kHeaderSize + kSmallStringLength;
+ const int kMapSize = Map::kSize;
+
+ Object* new_last = NULL;
+ for (int i = 0;
+ i + kSmallFixedArraySize <= new_space_size;
+ i += kSmallFixedArraySize) {
+ Object* obj = Heap::AllocateFixedArray(kSmallFixedArrayLength);
+ if (new_last != NULL) {
+ CHECK_EQ(reinterpret_cast<char*>(obj),
+ reinterpret_cast<char*>(new_last) + kSmallFixedArraySize);
+ }
+ new_last = obj;
+ }
+
+ Object* pointer_last = NULL;
+ for (int i = 0;
+ i + kSmallFixedArraySize <= size;
+ i += kSmallFixedArraySize) {
+ Object* obj = Heap::AllocateFixedArray(kSmallFixedArrayLength, TENURED);
+ int old_page_fullness = i % Page::kPageSize;
+ int page_fullness = (i + kSmallFixedArraySize) % Page::kPageSize;
+ if (page_fullness < old_page_fullness ||
+ page_fullness > Page::kObjectAreaSize) {
+ i = RoundUp(i, Page::kPageSize);
+ pointer_last = NULL;
+ }
+ if (pointer_last != NULL) {
+ CHECK_EQ(reinterpret_cast<char*>(obj),
+ reinterpret_cast<char*>(pointer_last) + kSmallFixedArraySize);
+ }
+ pointer_last = obj;
+ }
+
+ Object* data_last = NULL;
+ for (int i = 0; i + kSmallStringSize <= size; i += kSmallStringSize) {
+ Object* obj = Heap::AllocateRawAsciiString(kSmallStringLength, TENURED);
+ int old_page_fullness = i % Page::kPageSize;
+ int page_fullness = (i + kSmallStringSize) % Page::kPageSize;
+ if (page_fullness < old_page_fullness ||
+ page_fullness > Page::kObjectAreaSize) {
+ i = RoundUp(i, Page::kPageSize);
+ data_last = NULL;
+ }
+ if (data_last != NULL) {
+ CHECK_EQ(reinterpret_cast<char*>(obj),
+ reinterpret_cast<char*>(data_last) + kSmallStringSize);
+ }
+ data_last = obj;
+ }
+
+ Object* map_last = NULL;
+ for (int i = 0; i + kMapSize <= size; i += kMapSize) {
+ Object* obj = Heap::AllocateMap(JS_OBJECT_TYPE, 42 * kPointerSize);
+ int old_page_fullness = i % Page::kPageSize;
+ int page_fullness = (i + kMapSize) % Page::kPageSize;
+ if (page_fullness < old_page_fullness ||
+ page_fullness > Page::kObjectAreaSize) {
+ i = RoundUp(i, Page::kPageSize);
+ map_last = NULL;
+ }
+ if (map_last != NULL) {
+ CHECK_EQ(reinterpret_cast<char*>(obj),
+ reinterpret_cast<char*>(map_last) + kMapSize);
+ }
+ map_last = obj;
+ }
+
+ if (size > Page::kObjectAreaSize) {
+ // Support for reserving space in large object space is not there yet,
+ // but using an always-allocate scope is fine for now.
+ AlwaysAllocateScope always;
+ int large_object_array_length =
+ (size - FixedArray::kHeaderSize) / kPointerSize;
+ Object* obj = Heap::AllocateFixedArray(large_object_array_length,
+ TENURED);
+ CHECK(!obj->IsFailure());
+ }
+ }
+}
+
+
TEST(TestThatAlwaysSucceeds) {
}
diff --git a/test/es5conform/README b/test/es5conform/README
index a88f4a3..9cfc92b 100644
--- a/test/es5conform/README
+++ b/test/es5conform/README
@@ -4,7 +4,7 @@
https://es5conform.svn.codeplex.com/svn
-in revision 59101 as 'data' in this directory. Using later version
+in revision 62998 as 'data' in this directory. Using later version
may be possible but the tests are only known to pass (and indeed run)
with that revision.
diff --git a/test/es5conform/es5conform.status b/test/es5conform/es5conform.status
index 49cffb2..a755016 100644
--- a/test/es5conform/es5conform.status
+++ b/test/es5conform/es5conform.status
@@ -38,9 +38,6 @@
chapter14: UNIMPLEMENTED
chapter15/15.1: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.1: UNIMPLEMENTED
-chapter15/15.2/15.2.3/15.2.3.2: UNIMPLEMENTED
-chapter15/15.2/15.2.3/15.2.3.3: UNIMPLEMENTED
-chapter15/15.2/15.2.3/15.2.3.4: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.5: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.6: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.7: UNIMPLEMENTED
@@ -51,6 +48,210 @@
chapter15/15.2/15.2.3/15.2.3.12: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.13: UNIMPLEMENTED
+# Object.getPrototypeOf
+chapter15/15.2/15.2.3/15.2.3.2: PASS
+
+# Object.getOwnPropertyDescriptor
+chapter15/15.2/15.2.3/15.2.3.3: PASS
+
+# NOT IMPLEMENTED: defineProperty
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-3: FAIL_OK
+
+# NOT IMPLEMENTED: getOwnPropertyNames
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-16: FAIL_OK
+
+# NOT IMPLEMENTED: defineProperty
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-18: FAIL_OK
+
+# NOT IMPLEMENTED: defineProperties
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-19: FAIL_OK
+
+# NOT IMPLEMENTED: seal
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-20: FAIL_OK
+
+# NOT IMPLEMENTED: freeze
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-21: FAIL_OK
+
+# NOT IMPLEMENTED: preventExtensions
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-22: FAIL_OK
+
+# NOT IMPLEMENTED: isSealed
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-23: FAIL_OK
+
+# NOT IMPLEMENTED: isFrozen
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-24: FAIL_OK
+
+# NOT IMPLEMENTED: isExtensible
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-25: FAIL_OK
+
+# NOT IMPLEMENTED: bind
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-38: FAIL_OK
+
+# Built-ins have wrong descriptor (should all be false)
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-178: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-179: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-180: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-182: FAIL_OK
+
+# Our Function object has a "arguments" property which is used as a non
+# property in in the test
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-183: FAIL_OK
+
+
+# Our Function object has a "caller" property which is used as a non
+# property in in the test
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-184: FAIL_OK
+
+# Built-ins have wrong descriptor (should all be false)
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-185: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-186: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-187: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-188: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-189: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-190: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-191: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-192: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-193: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-194: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-195: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-201: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-210: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-211: FAIL_OK
+
+
+# NOT IMPLEMENTED: RegExp.prototype.source
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-212: FAIL_OK
+
+# NOT IMPLEMENTED: RegExp.prototype.global
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-213: FAIL_OK
+
+# NOT IMPLEMENTED: RegExp.prototype.ignoreCase
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-214: FAIL_OK
+
+# NOT IMPLEMENTED: RegExp.prototype.multiline
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-215: FAIL_OK
+
+# Errors have wrong descriptor (should all be false)
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-216: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-217: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-218: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-219: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-220: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-221: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-222: FAIL_OK
+
+# Object.getOwnPropertyNames
+chapter15/15.2/15.2.3/15.2.3.4: PASS
+
+# All of the tests below marked SUBSETFAIL (in 15.2.3.4) fail because
+# the tests assumes that objects can not have more properties
+# than those described in the spec - but according to spec they can
+# have additional properties.
+# All compareArray calls in these tests could be exchanged with a
+# isSubsetOfArray call (I will upload a path to the es5conform site)
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-1: FAIL_OK
+
+# SUBSETFAIL + we do not implement all methods on Object
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-2: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-3: FAIL_OK
+
+# SUBSETFAIL + we do not implement Function.prototype.bind
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-4: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-5: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-6: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-7: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-8: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-9: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-11: FAIL_OK
+
+# We do not implement all methods on RegExp
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-13: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-14: FAIL_OK
+
+# EvalError.prototype does not have message property
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-15: FAIL_OK
+
+# Rangeerror.prototype does not have message property
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-16: FAIL_OK
+
+# ReferenceError.prototype does not have message property
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-17: FAIL_OK
+
+# SyntaxError.prototype does not have message property
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-18: FAIL_OK
+
+# TypeError.prototype does not have message property
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-19: FAIL_OK
+
+# URIError.prototype does not have message property
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-20: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-22: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-23: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-24: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-25: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-26: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-27: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-28: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-29: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-30: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-31: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-32: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-33: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-34: FAIL_OK
+
+# SUBSETFAIL
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-35: FAIL_OK
+
+# getOwnPropertyDescriptor not implemented on array indices
+chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-b-1: FAIL_OK
+
+
+
+
# Object.keys
chapter15/15.2/15.2.3/15.2.3.14: PASS
@@ -59,7 +260,74 @@
chapter15/15.2/15.2.3/15.2.3.14/15.2.3.14-3-3: FAIL_OK
chapter15/15.3: UNIMPLEMENTED
-chapter15/15.4: UNIMPLEMENTED
+
+chapter15/15.4/15.4.4/15.4.4.14: UNIMPLEMENTED
+chapter15/15.4/15.4.4/15.4.4.15: UNIMPLEMENTED
+chapter15/15.4/15.4.4/15.4.4.20: UNIMPLEMENTED
+chapter15/15.4/15.4.4/15.4.4.21: UNIMPLEMENTED
+chapter15/15.4/15.4.4/15.4.4.22: UNIMPLEMENTED
+
+# Array.prototype.every
+chapter15/15.4/15.4.4/15.4.4.16: PASS
+
+# Wrong test - because this is not given as argument to arr.every
+# this._15_4_4_16_5_1 evaluates to undefined
+chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-5-1: FAIL_OK
+
+# In test case the element is not appended - it is added in the middle of
+# the array
+chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-7-1: FAIL_OK
+
+# We fail because the test assumes that if the reference to array is deleted it
+# is not longer traversed
+chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-7-7: FAIL_OK
+
+# if (val>1) in test should be if (val>2)
+chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-8-10: FAIL_OK
+
+
+# Array.prototype.some
+chapter15/15.4/15.4.4/15.4.4.17: PASS
+
+# Wrong assumption - according to spec some returns a Boolean, not a number
+chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-4-9: FAIL_OK
+
+# Same as 15.4.4.16-5-1
+chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-5-1: FAIL_OK
+
+# Same as 15.4.4.16-7-1
+chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-7-1: FAIL_OK
+
+# Same as 15.4.4.16-7-7
+chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-7-7: FAIL_OK
+
+# Same as 15.4.4.16-10-8
+chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-8-10: FAIL_OK
+
+
+# Array.prototype.forEach
+chapter15/15.4/15.4.4/15.4.4.18: PASS
+
+# Same as 15.4.4.16-5-1
+chapter15/15.4/15.4.4/15.4.4.18/15.4.4.18-5-1: FAIL_OK
+
+# Same as 15.4.4.16-7-7
+chapter15/15.4/15.4.4/15.4.4.18/15.4.4.18-7-6: FAIL_OK
+
+
+# Array.prototype.map
+chapter15/15.4/15.4.4/15.4.4.19: PASS
+
+# Same as 15.4.4.16-5-1
+chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-5-1: FAIL_OK
+
+# Same as 15.4.4.16-7-7
+chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-8-7: FAIL_OK
+
+# Uses a array index number as a property
+chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-8-c-iii-1: FAIL_OK
+
+
chapter15/15.5: UNIMPLEMENTED
chapter15/15.6: UNIMPLEMENTED
chapter15/15.7: UNIMPLEMENTED
diff --git a/test/message/bugs/.svn/all-wcprops b/test/message/bugs/.svn/all-wcprops
new file mode 100644
index 0000000..f83e5fb
--- /dev/null
+++ b/test/message/bugs/.svn/all-wcprops
@@ -0,0 +1,5 @@
+K 25
+svn:wc:ra_dav:version-url
+V 58
+/svn/!svn/ver/565/branches/bleeding_edge/test/message/bugs
+END
diff --git a/test/message/bugs/.svn/entries b/test/message/bugs/.svn/entries
new file mode 100644
index 0000000..30c6935
--- /dev/null
+++ b/test/message/bugs/.svn/entries
@@ -0,0 +1,28 @@
+8
+
+dir
+3649
+http://v8.googlecode.com/svn/branches/bleeding_edge/test/message/bugs
+http://v8.googlecode.com/svn
+
+
+
+2008-10-23T08:40:19.012798Z
+565
+sgjesse@chromium.org
+
+
+svn:special svn:externals svn:needs-lock
+
+
+
+
+
+
+
+
+
+
+
+ce2b1a6d-e550-0410-aec6-3dcde31c8c00
+
diff --git a/test/message/bugs/.svn/format b/test/message/bugs/.svn/format
new file mode 100644
index 0000000..45a4fb7
--- /dev/null
+++ b/test/message/bugs/.svn/format
@@ -0,0 +1 @@
+8
diff --git a/test/mjsunit/bit-not.js b/test/mjsunit/bit-not.js
new file mode 100644
index 0000000..85eccc4
--- /dev/null
+++ b/test/mjsunit/bit-not.js
@@ -0,0 +1,75 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function testBitNot(x, name) {
+ // The VM constant folds so we use that to check the result.
+ var expected = eval("~(" + x + ")");
+ var actual = ~x;
+ assertEquals(expected, actual, "x: " + name);
+
+ // Test the path where we can overwrite the result. Use -
+ // to avoid concatenating strings.
+ expected = eval("~(" + x + " - 0.01)");
+ actual = ~(x - 0.01);
+ assertEquals(expected, actual, "x - 0.01: " + name);
+}
+
+
+testBitNot(0, 0);
+testBitNot(1, 1);
+testBitNot(-1, 1);
+testBitNot(100, 100);
+testBitNot(0x40000000, "0x40000000");
+testBitNot(0x7fffffff, "0x7fffffff");
+testBitNot(0x80000000, "0x80000000");
+
+testBitNot(2.2, 2.2);
+testBitNot(-2.3, -2.3);
+testBitNot(Infinity, "Infinity");
+testBitNot(NaN, "NaN");
+testBitNot(-Infinity, "-Infinity");
+testBitNot(0x40000000 + 0.12345, "float1");
+testBitNot(0x40000000 - 0.12345, "float2");
+testBitNot(0x7fffffff + 0.12345, "float3");
+testBitNot(0x7fffffff - 0.12345, "float4");
+testBitNot(0x80000000 + 0.12345, "float5");
+testBitNot(0x80000000 - 0.12345, "float6");
+
+testBitNot("0", "string0");
+testBitNot("2.3", "string2.3");
+testBitNot("-9.4", "string-9.4");
+
+
+// Try to test that we can deal with allocation failures in
+// the fast path and just use the slow path instead.
+function TryToGC() {
+ var x = 0x40000000;
+ for (var i = 0; i < 1000000; i++) {
+ assertEquals(~0x40000000, ~x);
+ }
+}
+TryToGC();
diff --git a/test/mjsunit/bitwise-operations-undefined.js b/test/mjsunit/bitwise-operations-undefined.js
new file mode 100644
index 0000000..716e52d
--- /dev/null
+++ b/test/mjsunit/bitwise-operations-undefined.js
@@ -0,0 +1,49 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test bitwise operations with undefined.
+
+function testUndefinedLeftHandSide() {
+ assertEquals(undefined | 1, 1);
+ assertEquals(undefined & 1, 0);
+ assertEquals(undefined ^ 1, 1);
+ assertEquals(undefined << 1, 0);
+ assertEquals(undefined >> 1, 0);
+ assertEquals(undefined >>> 1, 0);
+}
+
+function testUndefinedRightHandSide() {
+ assertEquals(1 | undefined, 1);
+ assertEquals(1 & undefined, 0);
+ assertEquals(1 ^ undefined, 1);
+ assertEquals(1 << undefined, 1);
+ assertEquals(1 >> undefined, 1);
+ assertEquals(1 >>> undefined, 1);
+}
+
+testUndefinedLeftHandSide();
+testUndefinedRightHandSide();
diff --git a/test/mjsunit/compare-character.js b/test/mjsunit/compare-character.js
new file mode 100644
index 0000000..cabe013
--- /dev/null
+++ b/test/mjsunit/compare-character.js
@@ -0,0 +1,50 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test the optimized implementation of comparison with single-character
+// strings.
+
+var a = ['', String.fromCharCode(0), ' ', 'e', 'erik', 'f', 'foo', 'g', 'goo',
+ -1, 0, 1, 1.2, -7.9, true, false, 'foo', '0', 'NaN' ];
+for (var i in a) {
+ var x = a[i];
+ var f = 'f';
+
+ assertEquals(x == f, x == 'f', "==" + x);
+ assertEquals(x === f, x === 'f', "===" + x);
+ assertEquals(x < f, x < 'f', "<" + x);
+ assertEquals(x <= f, x <= 'f', "<=" + x);
+ assertEquals(x > f, x > 'f', ">" + x);
+ assertEquals(x >= f, x >= 'f', ">=" + x);
+ assertEquals(f == x, 'f' == x, "==r" + x);
+ assertEquals(f === x, 'f' === x, "===r" + x);
+ assertEquals(f > x, 'f' > x, "<r" + x);
+ assertEquals(f >= x, 'f' >= x, "<=r" + x);
+ assertEquals(f < x, 'f' < x, ">r" + x);
+ assertEquals(f <= x, 'f' <= x, ">=r" + x);
+}
+
diff --git a/test/mjsunit/compare-nan.js b/test/mjsunit/compare-nan.js
index fc40acc..c4f7817 100644
--- a/test/mjsunit/compare-nan.js
+++ b/test/mjsunit/compare-nan.js
@@ -42,3 +42,25 @@
assertFalse(x <= NaN, "" + x + " <= NaN");
assertFalse(x >= NaN, "" + x + " >= NaN");
}
+
+var b = ["NaN", "-1", "0", "1", "1.2", "-7.9", "true", "false", "'foo'", "'0'",
+ "'NaN'" ];
+for (var i in b) {
+ var x = b[i];
+ var program =
+ "assertFalse(NaN == " + x + ", 'NaN == ' + " + x + ");\n" +
+ "assertFalse(NaN === " + x + ", 'NaN === ' + " + x + ");\n" +
+ "assertFalse(NaN < " + x + ", 'NaN < ' + " + x + ");\n" +
+ "assertFalse(NaN > " + x + ", 'NaN > ' + " + x + ");\n" +
+ "assertFalse(NaN <= " + x + ", 'NaN <= ' + " + x + ");\n" +
+ "assertFalse(NaN >= " + x + ", 'NaN >= ' + " + x + ");\n" +
+
+ "assertFalse(" + x + " == NaN, '' + " + x + " + ' == NaN');\n" +
+ "assertFalse(" + x + " === NaN, '' + " + x + " + ' === NaN');\n" +
+ "assertFalse(" + x + " < NaN, '' + " + x + " + ' < NaN');\n" +
+ "assertFalse(" + x + " > NaN, '' + " + x + " + ' > NaN');\n" +
+ "assertFalse(" + x + " <= NaN, '' + " + x + " + ' <= NaN');\n" +
+ "assertFalse(" + x + " >= NaN, '' + " + x + " + ' >= NaN');\n";
+ eval(program);
+}
+
diff --git a/test/mjsunit/compiler/countoperation.js b/test/mjsunit/compiler/countoperation.js
new file mode 100644
index 0000000..5660cee
--- /dev/null
+++ b/test/mjsunit/compiler/countoperation.js
@@ -0,0 +1,111 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test pre- and postfix count operations.
+
+// Test value context.
+var a = 42;
+var b = {x:42};
+var c = "x";
+assertEquals(43, ++a);
+assertEquals(43, a);
+assertEquals(43, a++);
+assertEquals(44, a);
+assertEquals(43, ++b.x);
+assertEquals(43, b.x);
+assertEquals(43, b.x++);
+assertEquals(44, b.x);
+assertEquals(45, ++b[c]);
+assertEquals(45, b[c]);
+assertEquals(45, b[c]++);
+assertEquals(46, b[c]);
+
+// Test effect context.
+a = 42;
+b = {x:42};
+c = "x";
+assertEquals(1, eval("++a; 1"));
+assertEquals(43, a);
+assertEquals(1, eval("a++; 1"));
+assertEquals(44, a);
+assertEquals(1, eval("++b.x; 1"));
+assertEquals(43, b.x);
+assertEquals(1, eval("b.x++; 1"));
+assertEquals(44, b.x);
+assertEquals(1, eval("++b[c]; 1"));
+assertEquals(45, b[c]);
+assertEquals(1, eval("b[c]++; 1"));
+assertEquals(46, b[c]);
+
+// Test test context.
+a = 42;
+b = {x:42};
+c = "x";
+assertEquals(1, (++a) ? 1 : 0);
+assertEquals(43, a);
+assertEquals(1, (a++) ? 1 : 0);
+assertEquals(44, a);
+assertEquals(1, (++b.x) ? 1 : 0);
+assertEquals(43, b.x);
+assertEquals(1, (b.x++) ? 1 : 0);
+assertEquals(44, b.x);
+assertEquals(1, (++b[c]) ? 1 : 0);
+assertEquals(45, b[c]);
+assertEquals(1, (b[c]++) ? 1 : 0);
+assertEquals(46, b[c]);
+
+// Test value/test and test/value contexts.
+a = 42;
+b = {x:42};
+c = "x";
+assertEquals(43, ++a || 1);
+assertEquals(43, a);
+assertEquals(43, a++ || 1);
+assertEquals(44, a);
+assertEquals(43, ++b.x || 1);
+assertEquals(43, b.x);
+assertEquals(43, (b.x++) || 1);
+assertEquals(44, b.x);
+assertEquals(45, ++b[c] || 1);
+assertEquals(45, b[c]);
+assertEquals(45, b[c]++ || 1);
+assertEquals(46, b[c]);
+a = 42;
+b = {x:42};
+c = "x";
+assertEquals(1, ++a && 1);
+assertEquals(43, a);
+assertEquals(1, a++ && 1);
+assertEquals(44, a);
+assertEquals(1, ++b.x && 1);
+assertEquals(43, b.x);
+assertEquals(1, (b.x++) && 1);
+assertEquals(44, b.x);
+assertEquals(1, ++b[c] && 1);
+assertEquals(45, b[c]);
+assertEquals(1, b[c]++ && 1);
+assertEquals(46, b[c]);
diff --git a/test/mjsunit/compiler/short-circuit.js b/test/mjsunit/compiler/short-circuit.js
new file mode 100644
index 0000000..42100e7
--- /dev/null
+++ b/test/mjsunit/compiler/short-circuit.js
@@ -0,0 +1,102 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test some expression contexts involving short-circuit boolean
+// operations that did not otherwise have test coverage.
+
+
+var x = 42;
+
+// Literals in value/test context.
+assertEquals(x, function () { return 0 || x }());
+assertEquals(1, function () { return 1 || x }());
+
+// Literals in test/value context.
+assertEquals(0, function () { return 0 && x }());
+assertEquals(x, function () { return 1 && x }());
+
+// A value on top of the stack in value/test context.
+assertEquals(x, function(y) { return y++ || x }(0));
+assertEquals(1, function(y) { return y++ || x }(1));
+
+// A value on top of the stack in a test/value context.
+assertEquals(0, function(y) { return y++ && x }(0));
+assertEquals(x, function(y) { return y++ && x }(1));
+
+// An object literal in value context.
+assertEquals(0, function () { return {x: 0}}().x);
+
+// An object literal in value/test context.
+assertEquals(0, function () { return {x: 0} || this }().x);
+
+// An object literal in test/value context.
+assertEquals(x, function () { return {x: 0} && this }().x);
+
+// An array literal in value/test context.
+assertEquals(0, function () { return [0,1] || new Array(x,1) }()[0]);
+
+// An array literal in test/value context.
+assertEquals(x, function () { return [0,1] && new Array(x,1) }()[0]);
+
+// Slot assignment in value/test context.
+assertEquals(x, function (y) { return (y = 0) || x }("?"));
+assertEquals(1, function (y) { return (y = 1) || x }("?"));
+
+// Slot assignment in test/value context.
+assertEquals(0, function (y) { return (y = 0) && x }("?"));
+assertEquals(x, function (y) { return (y = 1) && x }("?"));
+
+// void in value context.
+assertEquals(void 0, function () { return void x }());
+
+// void in value/test context.
+assertEquals(x, function () { return (void x) || x }());
+
+// void in test/value context.
+assertEquals(void 0, function () { return (void x) && x }());
+
+// Unary not in value context.
+assertEquals(false, function () { return !x }());
+
+// Unary not in value/test context.
+assertEquals(true, function (y) { return !y || x }(0));
+assertEquals(x, function (y) { return !y || x }(1));
+
+// Unary not in test/value context.
+assertEquals(x, function (y) { return !y && x }(0));
+assertEquals(false, function (y) { return !y && x }(1));
+
+// Comparison in value context.
+assertEquals(false, function () { return x < x; }());
+
+// Comparison in value/test context.
+assertEquals(x, function () { return x < x || x; }());
+assertEquals(true, function () { return x <= x || x; }());
+
+// Comparison in test/value context.
+assertEquals(false, function () { return x < x && x; }());
+assertEquals(x, function () { return x <= x && x; }());
diff --git a/test/mjsunit/eval.js b/test/mjsunit/eval.js
index 08bd3d0..95357c7 100644
--- a/test/mjsunit/eval.js
+++ b/test/mjsunit/eval.js
@@ -58,16 +58,16 @@
// Test that un-aliased eval reads from local context.
foo = 0;
-result =
+result =
(function() {
var foo = 2;
return eval('foo');
})();
assertEquals(2, result);
-//Test that un-aliased eval writes to local context.
+// Test that un-aliased eval writes to local context.
foo = 0;
-result =
+result =
(function() {
var foo = 1;
eval('foo = 2');
@@ -84,7 +84,7 @@
// Test that aliased eval reads from global context.
var e = eval;
foo = 0;
-result =
+result =
(function() {
var foo = 2;
return e('foo');
@@ -105,7 +105,7 @@
// Try to cheat the 'aliased eval' detection.
var x = this;
foo = 0;
-result =
+result =
(function() {
var foo = 2;
return x.eval('foo');
@@ -113,7 +113,7 @@
assertEquals(0, result);
foo = 0;
-result =
+result =
(function() {
var eval = function(x) { return x; };
var foo = eval(2);
@@ -128,8 +128,29 @@
})();
assertEquals(4, result);
+result =
+ (function() {
+ eval("var eval = function(s) { return this; }");
+ return eval("42"); // Should return the global object
+ })();
+assertEquals(this, result);
+
+result =
+ (function() {
+ var obj = { f: function(eval) { return eval("this"); } };
+ return obj.f(eval);
+ })();
+assertEquals(this, result);
+
+result =
+ (function() {
+ var obj = { f: function(eval) { arguments; return eval("this"); } };
+ return obj.f(eval);
+ })();
+assertEquals(this, result);
+
eval = function(x) { return 2 * x; };
-result =
+result =
(function() {
return (function() { return eval(2); })();
})();
diff --git a/test/mjsunit/fuzz-natives.js b/test/mjsunit/fuzz-natives.js
index f495c72..d906eb8 100644
--- a/test/mjsunit/fuzz-natives.js
+++ b/test/mjsunit/fuzz-natives.js
@@ -95,7 +95,11 @@
var knownProblems = {
"Abort": true,
-
+
+ // Avoid calling the concat operation, because weird lengths
+ // may lead to out-of-memory.
+ "StringBuilderConcat": true,
+
// These functions use pseudo-stack-pointers and are not robust
// to unexpected integer values.
"DebugEvaluate": true,
@@ -114,7 +118,7 @@
// the rest of the tests.
"DisableAccessChecks": true,
"EnableAccessChecks": true,
-
+
// These functions should not be callable as runtime functions.
"NewContext": true,
"NewArgumentsFast": true,
@@ -129,7 +133,6 @@
"Log": true,
"DeclareGlobals": true,
- "CollectStackTrace": true,
"PromoteScheduledException": true,
"DeleteHandleScopeExtensions": true
};
diff --git a/test/mjsunit/get-own-property-descriptor.js b/test/mjsunit/get-own-property-descriptor.js
new file mode 100644
index 0000000..79172c8
--- /dev/null
+++ b/test/mjsunit/get-own-property-descriptor.js
@@ -0,0 +1,51 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function get(){return x}
+function set(x){this.x=x};
+
+var obj = {x:1};
+obj.__defineGetter__("accessor", get);
+obj.__defineSetter__("accessor", set);
+
+
+var descIsData = Object.getOwnPropertyDescriptor(obj,'x');
+assertTrue(descIsData.enumerable);
+assertTrue(descIsData.writable);
+assertTrue(descIsData.configurable);
+
+var descIsAccessor = Object.getOwnPropertyDescriptor(obj, 'accessor');
+assertTrue(descIsAccessor.enumerable);
+assertTrue(descIsAccessor.configurable);
+assertTrue(descIsAccessor.get == get);
+assertTrue(descIsAccessor.set == set);
+
+var descIsNotData = Object.getOwnPropertyDescriptor(obj, 'not-x');
+assertTrue(descIsNotData == undefined);
+
+var descIsNotAccessor = Object.getOwnPropertyDescriptor(obj, 'not-accessor');
+assertTrue(descIsNotAccessor == undefined);
diff --git a/test/mjsunit/get-prototype-of.js b/test/mjsunit/get-prototype-of.js
new file mode 100644
index 0000000..6475bde
--- /dev/null
+++ b/test/mjsunit/get-prototype-of.js
@@ -0,0 +1,68 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+function TryGetPrototypeOfNonObject(x) {
+ var caught = 0;
+ try {
+ Object.getPrototypeOf(x);
+ } catch (e) {
+ caught = e;
+ }
+
+ assertTrue(caught instanceof TypeError);
+};
+
+function GetPrototypeOfObject(x) {
+ assertDoesNotThrow(Object.getPrototypeOf(x));
+ assertNotNull(Object.getPrototypeOf(x));
+ assertEquals(Object.getPrototypeOf(x), x.__proto__);
+}
+
+function F(){};
+
+// Non object
+var x = 10;
+
+// Object
+var y = new F();
+
+// Make sure that TypeError exceptions are thrown when non-objects are passed
+// as argument
+TryGetPrototypeOfNonObject(0);
+TryGetPrototypeOfNonObject(null);
+TryGetPrototypeOfNonObject('Testing');
+TryGetPrototypeOfNonObject(x);
+
+// Make sure the real objects have this method and that it returns the
+// actual prototype object. Also test for Functions and RegExp.
+GetPrototypeOfObject(this);
+GetPrototypeOfObject(y);
+GetPrototypeOfObject({x:5});
+GetPrototypeOfObject(F);
+GetPrototypeOfObject(RegExp);
+
diff --git a/test/mjsunit/json.js b/test/mjsunit/json.js
index bf44f78..35e1634 100644
--- a/test/mjsunit/json.js
+++ b/test/mjsunit/json.js
@@ -65,9 +65,9 @@
GenericToJSONChecks(String, "x", "y");
// Date toJSON
-assertEquals("1970-01-01T00:00:00Z", new Date(0).toJSON());
-assertEquals("1979-01-11T08:00:00Z", new Date("1979-01-11 08:00 GMT").toJSON());
-assertEquals("2005-05-05T05:05:05Z", new Date("2005-05-05 05:05:05 GMT").toJSON());
+assertEquals("1970-01-01T00:00:00.000Z", new Date(0).toJSON());
+assertEquals("1979-01-11T08:00:00.000Z", new Date("1979-01-11 08:00 GMT").toJSON());
+assertEquals("2005-05-05T05:05:05.000Z", new Date("2005-05-05 05:05:05 GMT").toJSON());
var n1 = new Date(10000);
n1.toISOString = function () { return "foo"; };
assertEquals("foo", n1.toJSON());
diff --git a/test/mjsunit/math-min-max.js b/test/mjsunit/math-min-max.js
index 1a98d44..f9475d6 100644
--- a/test/mjsunit/math-min-max.js
+++ b/test/mjsunit/math-min-max.js
@@ -25,9 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
+
// Test Math.min().
-assertEquals(Number.POSITIVE_INFINITY, Math.min());
+assertEquals(Infinity, Math.min());
assertEquals(1, Math.min(1));
assertEquals(1, Math.min(1, 2));
assertEquals(1, Math.min(2, 1));
@@ -38,14 +40,26 @@
assertEquals(1.1, Math.min(3.3, 2.2, 1.1));
assertEquals(1.1, Math.min(2.2, 3.3, 1.1));
+// Prepare a non-Smi zero value.
+function returnsNonSmi(){ return 0.25; }
+var ZERO = returnsNonSmi() - returnsNonSmi();
+assertEquals(0, ZERO);
+assertEquals(Infinity, 1/ZERO);
+assertEquals(-Infinity, 1/-ZERO);
+assertFalse(%_IsSmi(ZERO));
+assertFalse(%_IsSmi(-ZERO));
+
var o = {};
o.valueOf = function() { return 1; };
assertEquals(1, Math.min(2, 3, '1'));
assertEquals(1, Math.min(3, o, 2));
assertEquals(1, Math.min(o, 2));
-assertEquals(Number.NEGATIVE_INFINITY, Number.POSITIVE_INFINITY / Math.min(-0, +0));
-assertEquals(Number.NEGATIVE_INFINITY, Number.POSITIVE_INFINITY / Math.min(+0, -0));
-assertEquals(Number.NEGATIVE_INFINITY, Number.POSITIVE_INFINITY / Math.min(+0, -0, 1));
+assertEquals(-Infinity, Infinity / Math.min(-0, +0));
+assertEquals(-Infinity, Infinity / Math.min(+0, -0));
+assertEquals(-Infinity, Infinity / Math.min(+0, -0, 1));
+assertEquals(-Infinity, Infinity / Math.min(-0, ZERO));
+assertEquals(-Infinity, Infinity / Math.min(ZERO, -0));
+assertEquals(-Infinity, Infinity / Math.min(ZERO, -0, 1));
assertEquals(-1, Math.min(+0, -0, -1));
assertEquals(-1, Math.min(-1, +0, -0));
assertEquals(-1, Math.min(+0, -1, -0));
@@ -73,9 +87,12 @@
assertEquals(3, Math.max(2, '3', 1));
assertEquals(3, Math.max(1, o, 2));
assertEquals(3, Math.max(o, 1));
-assertEquals(Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY / Math.max(-0, +0));
-assertEquals(Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY / Math.max(+0, -0));
-assertEquals(Number.POSITIVE_INFINITY, Number.POSITIVE_INFINITY / Math.max(+0, -0, -1));
+assertEquals(Infinity, Infinity / Math.max(-0, +0));
+assertEquals(Infinity, Infinity / Math.max(+0, -0));
+assertEquals(Infinity, Infinity / Math.max(+0, -0, -1));
+assertEquals(Infinity, Infinity / Math.max(-0, ZERO));
+assertEquals(Infinity, Infinity / Math.max(ZERO, -0));
+assertEquals(Infinity, Infinity / Math.max(ZERO, -0, -1));
assertEquals(1, Math.max(+0, -0, +1));
assertEquals(1, Math.max(+1, +0, -0));
assertEquals(1, Math.max(+0, +1, -0));
@@ -83,3 +100,6 @@
assertNaN(Math.max('oxen'));
assertNaN(Math.max('oxen', 1));
assertNaN(Math.max(1, 'oxen'));
+
+assertEquals(Infinity, 1/Math.max(ZERO, -0));
+assertEquals(Infinity, 1/Math.max(-0, ZERO));
diff --git a/test/mjsunit/mirror-date.js b/test/mjsunit/mirror-date.js
index 6b6a3ad..5c113de 100644
--- a/test/mjsunit/mirror-date.js
+++ b/test/mjsunit/mirror-date.js
@@ -57,7 +57,7 @@
// Test Date values.
testDateMirror(new Date(Date.parse("Dec 25, 1995 1:30 UTC")),
- "1995-12-25T01:30:00Z");
+ "1995-12-25T01:30:00.000Z");
d = new Date();
d.setUTCFullYear(1967);
d.setUTCMonth(0); // January.
@@ -66,10 +66,12 @@
d.setUTCMinutes(22);
d.setUTCSeconds(59);
d.setUTCMilliseconds(0);
-testDateMirror(d, "1967-01-17T09:22:59Z");
+testDateMirror(d, "1967-01-17T09:22:59.000Z");
d.setUTCMilliseconds(1);
-testDateMirror(d, "1967-01-17T09:22:59Z");
+testDateMirror(d, "1967-01-17T09:22:59.001Z");
d.setUTCSeconds(12);
-testDateMirror(d, "1967-01-17T09:22:12Z");
+testDateMirror(d, "1967-01-17T09:22:12.001Z");
d.setUTCSeconds(36);
-testDateMirror(d, "1967-01-17T09:22:36Z");
+testDateMirror(d, "1967-01-17T09:22:36.001Z");
+d.setUTCMilliseconds(136);
+testDateMirror(d, "1967-01-17T09:22:36.136Z");
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 8eb59b7..41388a3 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -45,6 +45,8 @@
# Very slow on ARM, contains no architecture dependent code.
unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm)
+# Skip long running test in debug.
+regress/regress-524: PASS, SKIP if $mode == debug
[ $arch == arm ]
diff --git a/test/mjsunit/object-create.js b/test/mjsunit/object-create.js
new file mode 100644
index 0000000..d838584
--- /dev/null
+++ b/test/mjsunit/object-create.js
@@ -0,0 +1,250 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test ES5 sections 15.2.3.5 Object.create.
+// We do not support nonconfigurable properties on objects so that is not
+// tested. We do test getters, setters, writable, enumerable and value.
+
+// Check that no exceptions are thrown.
+Object.create(null);
+Object.create(null, undefined);
+
+// Check that the right exception is thrown.
+try {
+ Object.create(4);
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/Object or null/.test(e));
+}
+
+try {
+ Object.create("foo");
+ print(2);
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/Object or null/.test(e));
+}
+
+var ctr = 0;
+var ctr2 = 0;
+var ctr3 = 0;
+var ctr4 = 0;
+var ctr5 = 0;
+var ctr6 = 1000;
+
+var protoFoo = { foo: function() { ctr++; }};
+var fooValue = { foo: { writable: true, value: function() { ctr2++; }}};
+var fooGetter = { foo: { get: function() { return ctr3++; }}};
+var fooSetter = { foo: { set: function() { return ctr4++; }}};
+var fooAmbiguous = { foo: { get: function() { return ctr3++; },
+ value: 3 }};
+
+function valueGet() { ctr5++; return 3 };
+function getterGet() { ctr5++; return function() { return ctr6++; }; };
+
+// Simple object with prototype, no properties added.
+Object.create(protoFoo).foo();
+assertEquals(1, ctr);
+
+// Simple object with object with prototype, no properties added.
+Object.create(Object.create(protoFoo)).foo();
+assertEquals(2, ctr);
+
+// Add a property foo that returns a function.
+var v = Object.create(protoFoo, fooValue);
+v.foo();
+assertEquals(2, ctr);
+assertEquals(1, ctr2);
+
+// Ensure the property is writable.
+v.foo = 42;
+assertEquals(42, v.foo);
+assertEquals(2, ctr);
+assertEquals(1, ctr2);
+
+// Ensure by default properties are not writable.
+v = Object.create(null, { foo: {value: 103}});
+assertEquals(103, v.foo);
+v.foo = 42;
+assertEquals(103, v.foo);
+
+// Add a getter foo that returns a counter value.
+assertEquals(0, Object.create(protoFoo, fooGetter).foo);
+assertEquals(2, ctr);
+assertEquals(1, ctr2);
+assertEquals(1, ctr3);
+
+// Add a setter foo that runs a function.
+assertEquals(1, Object.create(protoFoo, fooSetter).foo = 1);
+assertEquals(2, ctr);
+assertEquals(1, ctr2);
+assertEquals(1, ctr3);
+assertEquals(1, ctr4);
+
+// Make sure that trying to add both a value and a getter
+// will result in an exception.
+try {
+ Object.create(protoFoo, fooAmbiguous);
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/Invalid property/.test(e));
+}
+assertEquals(2, ctr);
+assertEquals(1, ctr2);
+assertEquals(1, ctr3);
+assertEquals(1, ctr4);
+
+var ctr7 = 0;
+
+var metaProps = {
+ enumerable: { get: function() {
+ assertEquals(0, ctr7++);
+ return true;
+ }},
+ configurable: { get: function() {
+ assertEquals(1, ctr7++);
+ return true;
+ }},
+ value: { get: function() {
+ assertEquals(2, ctr7++);
+ return 4;
+ }},
+ writable: { get: function() {
+ assertEquals(3, ctr7++);
+ return true;
+ }},
+ get: { get: function() {
+ assertEquals(4, ctr7++);
+ return function() { };
+ }},
+ set: { get: function() {
+ assertEquals(5, ctr7++);
+ return function() { };
+ }}
+};
+
+
+// Instead of a plain props object, let's use getters to return its properties.
+var magicValueProps = { foo: Object.create(null, { value: { get: valueGet }})};
+var magicGetterProps = { foo: Object.create(null, { get: { get: getterGet }})};
+var magicAmbiguousProps = { foo: Object.create(null, metaProps) };
+
+assertEquals(3, Object.create(null, magicValueProps).foo);
+assertEquals(1, ctr5);
+
+assertEquals(1000, Object.create(null, magicGetterProps).foo);
+assertEquals(2, ctr5);
+
+// See if we do the steps in ToPropertyDescriptor in the right order.
+// We shouldn't throw the exception for an ambiguous properties object
+// before we got all the values out.
+try {
+ Object.create(null, magicAmbiguousProps);
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/Invalid property/.test(e));
+ assertEquals(6, ctr7);
+}
+
+var magicWritableProps = {
+ foo: Object.create(null, { value: { value: 4 },
+ writable: { get: function() {
+ ctr6++;
+ return false;
+ }}})};
+
+var fooNotWritable = Object.create(null, magicWritableProps)
+assertEquals(1002, ctr6);
+assertEquals(4, fooNotWritable.foo);
+fooNotWritable.foo = 5;
+assertEquals(4, fooNotWritable.foo);
+
+
+// Test enumerable flag.
+
+var fooNotEnumerable =
+ Object.create({fizz: 14}, {foo: {value: 3, enumerable: false},
+ bar: {value: 4, enumerable: true},
+ baz: {value: 5}});
+var sum = 0;
+for (x in fooNotEnumerable) {
+ assertTrue(x === 'bar' || x === 'fizz');
+ sum += fooNotEnumerable[x];
+}
+assertEquals(18, sum);
+
+
+try {
+ Object.create(null, {foo: { get: 0 }});
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/Getter must be a function/.test(e));
+}
+
+try {
+ Object.create(null, {foo: { set: 0 }});
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/Setter must be a function/.test(e));
+}
+
+try {
+ Object.create(null, {foo: { set: 0, get: 0 }});
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/Getter must be a function/.test(e));
+}
+
+
+// Ensure that only enumerable own properties on the descriptor are used.
+var tricky = Object.create(
+ { foo: { value: 1, enumerable: true }},
+ { bar: { value: { value: 2, enumerable: true }, enumerable: false },
+ baz: { value: { value: 4, enumerable: false }, enumerable: true },
+ fizz: { value: { value: 8, enumerable: false }, enumerable: false },
+ buzz: { value: { value: 16, enumerable: true }, enumerable: true }});
+
+assertEquals(1, tricky.foo.value);
+assertEquals(2, tricky.bar.value);
+assertEquals(4, tricky.baz.value);
+assertEquals(8, tricky.fizz.value);
+assertEquals(16, tricky.buzz.value);
+
+var sonOfTricky = Object.create(null, tricky);
+
+assertFalse("foo" in sonOfTricky);
+assertFalse("bar" in sonOfTricky);
+assertTrue("baz" in sonOfTricky);
+assertFalse("fizz" in sonOfTricky);
+assertTrue("buzz" in sonOfTricky);
+
+var sum = 0;
+for (x in sonOfTricky) {
+ assertTrue(x === 'buzz');
+ sum += sonOfTricky[x];
+}
+assertEquals(16, sum);
diff --git a/test/mjsunit/object-get-own-property-names.js b/test/mjsunit/object-get-own-property-names.js
new file mode 100644
index 0000000..f52cee2
--- /dev/null
+++ b/test/mjsunit/object-get-own-property-names.js
@@ -0,0 +1,104 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test ES5 section 15.2.3.4 Object.getOwnPropertyNames.
+
+// Check simple cases.
+var obj = { a: 1, b: 2};
+var propertyNames = Object.getOwnPropertyNames(obj);
+propertyNames.sort();
+assertEquals(2, propertyNames.length);
+assertEquals("a", propertyNames[0]);
+assertEquals("b", propertyNames[1]);
+
+var obj = { a: function(){}, b: function(){} };
+var propertyNames = Object.getOwnPropertyNames(obj);
+propertyNames.sort();
+assertEquals(2, propertyNames.length);
+assertEquals("a", propertyNames[0]);
+assertEquals("b", propertyNames[1]);
+
+// Check slow case
+var obj = { a: 1, b: 2, c: 3 };
+delete obj.b;
+var propertyNames = Object.getOwnPropertyNames(obj)
+propertyNames.sort();
+assertEquals(2, propertyNames.length);
+assertEquals("a", propertyNames[0]);
+assertEquals("c", propertyNames[1]);
+
+// Check that non-enumerable properties are being returned.
+var propertyNames = Object.getOwnPropertyNames([1, 2]);
+propertyNames.sort();
+assertEquals(3, propertyNames.length);
+assertEquals("0", propertyNames[0]);
+assertEquals("1", propertyNames[1]);
+assertEquals("length", propertyNames[2]);
+
+// Check that no proto properties are returned.
+var obj = { foo: "foo" };
+obj.__proto__ = { bar: "bar" };
+propertyNames = Object.getOwnPropertyNames(obj);
+propertyNames.sort();
+assertEquals(1, propertyNames.length);
+assertEquals("foo", propertyNames[0]);
+
+// Check that getter properties are returned.
+var obj = {};
+obj.__defineGetter__("getter", function() {});
+propertyNames = Object.getOwnPropertyNames(obj);
+propertyNames.sort();
+assertEquals(1, propertyNames.length);
+assertEquals("getter", propertyNames[0]);
+
+try {
+ Object.getOwnPropertyNames(4);
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/on non-object/.test(e));
+}
+
+try {
+ Object.getOwnPropertyNames("foo");
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/on non-object/.test(e));
+}
+
+try {
+ Object.getOwnPropertyNames(undefined);
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/on non-object/.test(e));
+}
+
+try {
+ Object.getOwnPropertyNames(null);
+ assertTrue(false);
+} catch (e) {
+ assertTrue(/on non-object/.test(e));
+}
diff --git a/test/mjsunit/regress/regress-524.js b/test/mjsunit/regress/regress-524.js
new file mode 100644
index 0000000..b37ad8a
--- /dev/null
+++ b/test/mjsunit/regress/regress-524.js
@@ -0,0 +1,32 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test allocation of a large number of maps.
+
+var i = 500000
+var a = new Array(i)
+for (var j = 0; j < i; j++) { var o = {}; o.x = 42; delete o.x; a[j] = o; }
diff --git a/test/mjsunit/regress/regress-545.js b/test/mjsunit/regress/regress-545.js
new file mode 100644
index 0000000..36cde6d
--- /dev/null
+++ b/test/mjsunit/regress/regress-545.js
@@ -0,0 +1,47 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See: http://code.google.com/p/v8/issues/detail?id=545
+// and: http://code.google.com/p/chromium/issues/detail?id=28353
+
+// The "this" variable proxy was reused. If context annotations differ between
+// uses, this can cause a use in a value context to assume a test context. Since
+// it has no true/false labels set, it causes a null-pointer dereference and
+// segmentation fault.
+
+// Code should not crash:
+
+// Original bug report by Robert Swiecki (wrapped to not throw):
+try {
+ new IsPrimitive(load())?this.join():String(' ').charCodeAt((!this>Math));
+} catch (e) {}
+
+// Shorter examples:
+
+this + !this;
+
+this + (this ? 1 : 2);
diff --git a/test/mjsunit/regress/regress-crbug-3184.js b/test/mjsunit/regress/regress-crbug-3184.js
new file mode 100644
index 0000000..ed78183
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-3184.js
@@ -0,0 +1,83 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Object.extend = function (dest, source) {
+ for (property in source) dest[property] = source[property];
+ return dest;
+};
+
+Object.extend ( Function.prototype,
+{
+ wrap : function (wrapper) {
+ var method = this;
+ var bmethod = (function(_method) {
+ return function () {
+ this.$$$parentMethodStore$$$ = this.$proceed;
+ this.$proceed = function() { return _method.apply(this, arguments); };
+ };
+ })(method);
+ var amethod = function () {
+ this.$proceed = this.$$$parentMethodStore$$$;
+ if (this.$proceed == undefined) delete this.$proceed;
+ delete this.$$$parentMethodStore$$$;
+ };
+ var value = function() { bmethod.call(this); retval = wrapper.apply(this, arguments); amethod.call(this); return retval; };
+ return value;
+ }
+});
+
+String.prototype.cap = function() {
+ return this.charAt(0).toUpperCase() + this.substring(1).toLowerCase();
+};
+
+String.prototype.cap = String.prototype.cap.wrap(
+ function(each) {
+ if (each && this.indexOf(" ") != -1) {
+ return this.split(" ").map(
+ function (value) {
+ return value.cap();
+ }
+ ).join(" ");
+ } else {
+ return this.$proceed();
+ }
+});
+
+Object.extend( Array.prototype,
+{
+ map : function(fun) {
+ if (typeof fun != "function") throw new TypeError();
+ var len = this.length;
+ var res = new Array(len);
+ var thisp = arguments[1];
+ for (var i = 0; i < len; i++) { if (i in this) res[i] = fun.call(thisp, this[i], i, this); }
+ return res;
+ }
+});
+assertEquals("Test1 test1", "test1 test1".cap());
+assertEquals("Test2 Test2", "test2 test2".cap(true));
+
diff --git a/test/mjsunit/smi-ops.js b/test/mjsunit/smi-ops.js
index 284050d..39b4894 100644
--- a/test/mjsunit/smi-ops.js
+++ b/test/mjsunit/smi-ops.js
@@ -537,7 +537,7 @@
one = four - three;
zero = one - one;
- // Begin block A repeat 3
+ // Begin block A repeat 3
assertEquals(pos_non_smi, (pos_non_smi) >> zero);
assertEquals(pos_non_smi, (pos_non_smi) >>> zero);
assertEquals(pos_non_smi, (pos_non_smi) << zero);
@@ -638,6 +638,31 @@
testShiftNonSmis();
+function intConversion() {
+ function foo(x) {
+ assertEquals(x, (x * 1.0000000001) | 0, "foo more " + x);
+ assertEquals(x, x | 0, "foo " + x);
+ if (x > 0) {
+ assertEquals(x - 1, (x * 0.9999999999) | 0, "foo less " + x);
+ } else {
+ assertEquals(x + 1, (x * 0.9999999999) | 0, "foo less " + x);
+ }
+ }
+ for (var i = 1; i < 0x80000000; i *= 2) {
+ foo(i);
+ foo(-i);
+ }
+ for (var i = 1; i < 1/0; i *= 2) {
+ assertEquals(i | 0, (i * 1.0000000000000001) | 0, "b" + i);
+ assertEquals(-i | 0, (i * -1.0000000000000001) | 0, "c" + i);
+ }
+ for (var i = 0.5; i > 0; i /= 2) {
+ assertEquals(0, i | 0, "d" + i);
+ assertEquals(0, -i | 0, "e" + i);
+ }
+}
+
+intConversion();
// Verify that we handle the (optimized) corner case of shifting by
// zero even for non-smis.
diff --git a/test/mjsunit/try.js b/test/mjsunit/try.js
index 0bd78b4..794860a 100644
--- a/test/mjsunit/try.js
+++ b/test/mjsunit/try.js
@@ -347,3 +347,48 @@
assertFalse(caught);
assertTrue(finalized);
+function return_from_nested_finally_in_finally() {
+ try {
+ return 1;
+ } finally {
+ try {
+ return 2;
+ } finally {
+ return 42;
+ }
+ }
+}
+
+assertEquals(42, return_from_nested_finally_in_finally());
+
+function break_from_nested_finally_in_finally() {
+ L: try {
+ return 1;
+ } finally {
+ try {
+ return 2;
+ } finally {
+ break L;
+ }
+ }
+ return 42;
+}
+
+assertEquals(42, break_from_nested_finally_in_finally());
+
+function continue_from_nested_finally_in_finally() {
+ do {
+ try {
+ return 1;
+ } finally {
+ try {
+ return 2;
+ } finally {
+ continue;
+ }
+ }
+ } while (false);
+ return 42;
+}
+
+assertEquals(42, continue_from_nested_finally_in_finally());
diff --git a/test/mjsunit/value-wrapper.js b/test/mjsunit/value-wrapper.js
new file mode 100644
index 0000000..33ef013
--- /dev/null
+++ b/test/mjsunit/value-wrapper.js
@@ -0,0 +1,138 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// When calling user-defined functions on strings, booleans or
+// numbers, we should create a wrapper object.
+
+function RunTests() {
+ for (var i = 0; i < 10; i++) {
+ assertEquals('object', 'xxx'.TypeOfThis());
+ assertEquals('object', true.TypeOfThis(2,3));
+ assertEquals('object', false.TypeOfThis());
+ assertEquals('object', (42).TypeOfThis());
+ assertEquals('object', (3.14).TypeOfThis());
+ }
+
+ for (var i = 0; i < 10; i++) {
+ assertEquals('object', 'xxx'['TypeOfThis']());
+ assertEquals('object', true['TypeOfThis']());
+ assertEquals('object', false['TypeOfThis']());
+ assertEquals('object', (42)['TypeOfThis']());
+ assertEquals('object', (3.14)['TypeOfThis']());
+ }
+
+ function CallTypeOfThis(obj) {
+ assertEquals('object', obj.TypeOfThis());
+ }
+
+ for (var i = 0; i < 10; i++) {
+ CallTypeOfThis('xxx');
+ CallTypeOfThis(true);
+ CallTypeOfThis(false);
+ CallTypeOfThis(42);
+ CallTypeOfThis(3.14);
+ }
+
+ function TestWithWith(obj) {
+ with (obj) {
+ for (var i = 0; i < 10; i++) {
+ assertEquals('object', TypeOfThis());
+ }
+ }
+ }
+
+ TestWithWith('xxx');
+ TestWithWith(true);
+ TestWithWith(false);
+ TestWithWith(42);
+ TestWithWith(3.14);
+
+ for (var i = 0; i < 10; i++) {
+ assertEquals('object', true[7]());
+ assertEquals('object', false[7]());
+ assertEquals('object', (42)[7]());
+ assertEquals('object', (3.14)[7]());
+ }
+}
+
+function TypeOfThis() { return typeof this; }
+
+// Test with normal setup of prototype.
+String.prototype.TypeOfThis = TypeOfThis;
+Boolean.prototype.TypeOfThis = TypeOfThis;
+Number.prototype.TypeOfThis = TypeOfThis;
+Boolean.prototype[7] = TypeOfThis;
+Number.prototype[7] = TypeOfThis;
+
+
+RunTests();
+
+// Run test after properties have been set to a different value.
+String.prototype.TypeOfThis = 'x';
+Boolean.prototype.TypeOfThis = 'x';
+Number.prototype.TypeOfThis = 'x';
+Boolean.prototype[7] = 'x';
+Number.prototype[7] = 'x';
+
+String.prototype.TypeOfThis = TypeOfThis;
+Boolean.prototype.TypeOfThis = TypeOfThis;
+Number.prototype.TypeOfThis = TypeOfThis;
+Boolean.prototype[7] = TypeOfThis;
+Number.prototype[7] = TypeOfThis;
+
+RunTests();
+
+// Force the prototype into slow case and run the test again.
+delete String.prototype.TypeOfThis;
+delete Boolean.prototype.TypeOfThis;
+delete Number.prototype.TypeOfThis;
+Boolean.prototype[7];
+Number.prototype[7];
+
+String.prototype.TypeOfThis = TypeOfThis;
+Boolean.prototype.TypeOfThis = TypeOfThis;
+Number.prototype.TypeOfThis = TypeOfThis;
+Boolean.prototype[7] = TypeOfThis;
+Number.prototype[7] = TypeOfThis;
+
+RunTests();
+
+// According to ES3 15.3.4.3 the this value passed to Function.prototyle.apply
+// should wrapped. According to ES5 it should not.
+assertEquals('object', TypeOfThis.apply('xxx', []));
+assertEquals('object', TypeOfThis.apply(true, []));
+assertEquals('object', TypeOfThis.apply(false, []));
+assertEquals('object', TypeOfThis.apply(42, []));
+assertEquals('object', TypeOfThis.apply(3.14, []));
+
+// According to ES3 15.3.4.3 the this value passed to Function.prototyle.call
+// should wrapped. According to ES5 it should not.
+assertEquals('object', TypeOfThis.call('xxx'));
+assertEquals('object', TypeOfThis.call(true));
+assertEquals('object', TypeOfThis.call(false));
+assertEquals('object', TypeOfThis.call(42));
+assertEquals('object', TypeOfThis.call(3.14));
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index ba7224b..4368eb8 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -199,9 +199,7 @@
'conditions': [
# The ARM assembler assumes the host is 32 bits, so force building
# 32-bit host tools.
- # TODO(piman): This assumes that the host is ia32 or amd64. Fixing the
- # code would be better
- ['target_arch=="arm" and _toolset=="host"', {
+ ['target_arch=="arm" and host_arch=="x64" and _toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}]
@@ -428,9 +426,7 @@
'conditions': [
# The ARM assembler assumes the host is 32 bits, so force building
# 32-bit host tools.
- # TODO(piman): This assumes that the host is ia32 or amd64. Fixing
- # the code would be better
- ['_toolset=="host"', {
+ ['host_arch=="x64" and _toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}]
@@ -598,9 +594,7 @@
'conditions': [
# The ARM assembler assumes the host is 32 bits, so force building
# 32-bit host tools.
- # TODO(piman): This assumes that the host is ia32 or amd64. Fixing
- # the code would be better
- ['target_arch=="arm" and _toolset=="host"', {
+ ['target_arch=="arm" and host_arch=="x64" and _toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}]
diff --git a/tools/jsmin.py b/tools/jsmin.py
index fd1abe4..646bf14 100644
--- a/tools/jsmin.py
+++ b/tools/jsmin.py
@@ -230,7 +230,9 @@
# A regexp that matches a literal string surrounded by 'double quotes'.
single_quoted_string = r"'(?:[^'\\]|\\.)*'"
# A regexp that matches a regexp literal surrounded by /slashes/.
- slash_quoted_regexp = r"/(?:[^/\\]|\\.)+/"
+ # Don't allow a regexp to have a ) before the first ( since that's a
+ # syntax error and it's probably just two unrelated slashes.
+ slash_quoted_regexp = r"/(?:(?=\()|(?:[^()/\\]|\\.)+)(?:\([^/\\]|\\.)*/"
# Replace multiple spaces with a single space.
line = re.sub("|".join([double_quoted_string,
single_quoted_string,
diff --git a/tools/presubmit.py b/tools/presubmit.py
index 3f27c00..04952e0 100755
--- a/tools/presubmit.py
+++ b/tools/presubmit.py
@@ -221,7 +221,7 @@
COPYRIGHT_HEADER_PATTERN = re.compile(
- r'Copyright [\d-]*200[8-9] the V8 project authors. All rights reserved.')
+ r'Copyright [\d-]*20[0-1][0-9] the V8 project authors. All rights reserved.')
class SourceProcessor(SourceFileProcessor):
"""
diff --git a/tools/profile.js b/tools/profile.js
index db4b542..d41f5cd 100644
--- a/tools/profile.js
+++ b/tools/profile.js
@@ -163,6 +163,16 @@
/**
+ * Retrieves a code entry by an address.
+ *
+ * @param {number} addr Entry address.
+ */
+devtools.profiler.Profile.prototype.findEntry = function(addr) {
+ return this.codeMap_.findEntry(addr);
+};
+
+
+/**
* Records a tick event. Stack must contain a sequence of
* addresses starting with the program counter value.
*
@@ -345,6 +355,14 @@
/**
+ * Returns raw node name (without type decoration).
+ */
+devtools.profiler.Profile.DynamicCodeEntry.prototype.getRawName = function() {
+ return this.name;
+};
+
+
+/**
* Constructs a call graph.
*
* @constructor
diff --git a/tools/stats-viewer.py b/tools/stats-viewer.py
index bd6a8fb..14b2147 100755
--- a/tools/stats-viewer.py
+++ b/tools/stats-viewer.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python
+#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -35,6 +37,7 @@
import mmap
import os
+import re
import struct
import sys
import time
@@ -49,8 +52,9 @@
COUNTER_LABELS = {"t": "%i ms.", "c": "%i"}
-# The magic number used to check if a file is not a counters file
+# The magic numbers used to check if a file is not a counters file
COUNTERS_FILE_MAGIC_NUMBER = 0xDEADFACE
+CHROME_COUNTERS_FILE_MAGIC_NUMBER = 0x13131313
class StatsViewer(object):
@@ -92,17 +96,31 @@
something goes wrong print an informative message and exit the
program."""
if not os.path.exists(self.data_name):
- print "File %s doesn't exist." % self.data_name
- sys.exit(1)
+ maps_name = "/proc/%s/maps" % self.data_name
+ if not os.path.exists(maps_name):
+ print "\"%s\" is neither a counter file nor a PID." % self.data_name
+ sys.exit(1)
+ maps_file = open(maps_name, "r")
+ try:
+ m = re.search(r"/dev/shm/\S*", maps_file.read())
+ if m is not None and os.path.exists(m.group(0)):
+ self.data_name = m.group(0)
+ else:
+ print "Can't find counter file in maps for PID %s." % self.data_name
+ sys.exit(1)
+ finally:
+ maps_file.close()
data_file = open(self.data_name, "r")
size = os.fstat(data_file.fileno()).st_size
fileno = data_file.fileno()
self.shared_mmap = mmap.mmap(fileno, size, access=mmap.ACCESS_READ)
data_access = SharedDataAccess(self.shared_mmap)
- if data_access.IntAt(0) != COUNTERS_FILE_MAGIC_NUMBER:
- print "File %s is not stats data." % self.data_name
- sys.exit(1)
- return CounterCollection(data_access)
+ if data_access.IntAt(0) == COUNTERS_FILE_MAGIC_NUMBER:
+ return CounterCollection(data_access)
+ elif data_access.IntAt(0) == CHROME_COUNTERS_FILE_MAGIC_NUMBER:
+ return ChromeCounterCollection(data_access)
+ print "File %s is not stats data." % self.data_name
+ sys.exit(1)
def CleanUp(self):
"""Cleans up the memory mapped file if necessary."""
@@ -356,6 +374,72 @@
return 4 + self.max_name_size
+class ChromeCounter(object):
+ """A pointer to a single counter withing a binary counters file."""
+
+ def __init__(self, data, name_offset, value_offset):
+ """Create a new instance.
+
+ Args:
+ data: the shared data access object containing the counter
+ name_offset: the byte offset of the start of this counter's name
+ value_offset: the byte offset of the start of this counter's value
+ """
+ self.data = data
+ self.name_offset = name_offset
+ self.value_offset = value_offset
+
+ def Value(self):
+ """Return the integer value of this counter."""
+ return self.data.IntAt(self.value_offset)
+
+ def Name(self):
+ """Return the ascii name of this counter."""
+ result = ""
+ index = self.name_offset
+ current = self.data.ByteAt(index)
+ while current:
+ result += chr(current)
+ index += 1
+ current = self.data.ByteAt(index)
+ return result
+
+
+class ChromeCounterCollection(object):
+ """An overlay over a counters file that provides access to the
+ individual counters contained in the file."""
+
+ _HEADER_SIZE = 4 * 4
+ _NAME_SIZE = 32
+
+ def __init__(self, data):
+ """Create a new instance.
+
+ Args:
+ data: the shared data access object
+ """
+ self.data = data
+ self.max_counters = data.IntAt(8)
+ self.max_threads = data.IntAt(12)
+ self.counter_names_offset = \
+ self._HEADER_SIZE + self.max_threads * (self._NAME_SIZE + 2 * 4)
+ self.counter_values_offset = \
+ self.counter_names_offset + self.max_counters * self._NAME_SIZE
+
+ def CountersInUse(self):
+ """Return the number of counters in active use."""
+ for i in xrange(self.max_counters):
+ if self.data.ByteAt(self.counter_names_offset + i * self._NAME_SIZE) == 0:
+ return i
+ return self.max_counters
+
+ def Counter(self, i):
+ """Return the i'th counter."""
+ return ChromeCounter(self.data,
+ self.counter_names_offset + i * self._NAME_SIZE,
+ self.counter_values_offset + i * self.max_threads * 4)
+
+
def Main(data_file):
"""Run the stats counter.
@@ -367,6 +451,6 @@
if __name__ == "__main__":
if len(sys.argv) != 2:
- print "Usage: stats-viewer.py <stats data>"
+ print "Usage: stats-viewer.py <stats data>|<test_shell pid>"
sys.exit(1)
Main(sys.argv[1])
diff --git a/tools/test.py b/tools/test.py
index 75b4f61..f17e9b1 100755
--- a/tools/test.py
+++ b/tools/test.py
@@ -639,10 +639,7 @@
name = name + '.exe'
return name
-def RunTestCases(all_cases, progress, tasks):
- def DoSkip(case):
- return SKIP in c.outcomes or SLOW in c.outcomes
- cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
+def RunTestCases(cases_to_run, progress, tasks):
progress = PROGRESS_INDICATORS[progress](cases_to_run)
return progress.Run(tasks)
@@ -1335,13 +1332,16 @@
PrintReport(all_cases)
result = None
- if len(all_cases) == 0:
+ def DoSkip(case):
+ return SKIP in case.outcomes or SLOW in case.outcomes
+ cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
+ if len(cases_to_run) == 0:
print "No tests to run."
return 0
else:
try:
start = time.time()
- if RunTestCases(all_cases, options.progress, options.j):
+ if RunTestCases(cases_to_run, options.progress, options.j):
result = 0
else:
result = 1
@@ -1355,7 +1355,7 @@
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
- timed_tests = [ t.case for t in all_cases if not t.case.duration is None ]
+ timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:
diff --git a/tools/tickprocessor-driver.js b/tools/tickprocessor-driver.js
index dc67796..4201e43 100644
--- a/tools/tickprocessor-driver.js
+++ b/tools/tickprocessor-driver.js
@@ -44,10 +44,16 @@
};
var params = processArguments(arguments);
+var snapshotLogProcessor;
+if (params.snapshotLogFileName) {
+ snapshotLogProcessor = new SnapshotLogProcessor();
+ snapshotLogProcessor.processLogFile(params.snapshotLogFileName);
+}
var tickProcessor = new TickProcessor(
new (entriesProviders[params.platform])(params.nm),
params.separateIc,
params.ignoreUnknown,
- params.stateFilter);
+ params.stateFilter,
+ snapshotLogProcessor);
tickProcessor.processLogFile(params.logFileName);
tickProcessor.printStatistics();
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index fd23987..c566c22 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -53,14 +53,79 @@
function inherits(childCtor, parentCtor) {
- function tempCtor() {};
- tempCtor.prototype = parentCtor.prototype;
- childCtor.prototype = new tempCtor();
+ childCtor.prototype.__proto__ = parentCtor.prototype;
+};
+
+
+function SnapshotLogProcessor() {
+ devtools.profiler.LogReader.call(this, {
+ 'code-creation': {
+ parsers: [null, this.createAddressParser('code'), parseInt, null],
+ processor: this.processCodeCreation, backrefs: true },
+ 'code-move': { parsers: [this.createAddressParser('code'),
+ this.createAddressParser('code-move-to')],
+ processor: this.processCodeMove, backrefs: true },
+ 'code-delete': { parsers: [this.createAddressParser('code')],
+ processor: this.processCodeDelete, backrefs: true },
+ 'snapshot-pos': { parsers: [this.createAddressParser('code'), parseInt],
+ processor: this.processSnapshotPosition, backrefs: true }});
+
+ Profile.prototype.handleUnknownCode = function(operation, addr) {
+ var op = devtools.profiler.Profile.Operation;
+ switch (operation) {
+ case op.MOVE:
+ print('Snapshot: Code move event for unknown code: 0x' +
+ addr.toString(16));
+ break;
+ case op.DELETE:
+ print('Snapshot: Code delete event for unknown code: 0x' +
+ addr.toString(16));
+ break;
+ }
+ };
+
+ this.profile_ = new Profile();
+ this.serializedEntries_ = [];
+}
+inherits(SnapshotLogProcessor, devtools.profiler.LogReader);
+
+
+SnapshotLogProcessor.prototype.processCodeCreation = function(
+ type, start, size, name) {
+ var entry = this.profile_.addCode(
+ this.expandAlias(type), name, start, size);
+};
+
+
+SnapshotLogProcessor.prototype.processCodeMove = function(from, to) {
+ this.profile_.moveCode(from, to);
+};
+
+
+SnapshotLogProcessor.prototype.processCodeDelete = function(start) {
+ this.profile_.deleteCode(start);
+};
+
+
+SnapshotLogProcessor.prototype.processSnapshotPosition = function(addr, pos) {
+ this.serializedEntries_[pos] = this.profile_.findEntry(addr);
+};
+
+
+SnapshotLogProcessor.prototype.processLogFile = function(fileName) {
+ var contents = readFile(fileName);
+ this.processLogChunk(contents);
+};
+
+
+SnapshotLogProcessor.prototype.getSerializedEntryName = function(pos) {
+ var entry = this.serializedEntries_[pos];
+ return entry ? entry.getRawName() : null;
};
function TickProcessor(
- cppEntriesProvider, separateIc, ignoreUnknown, stateFilter) {
+ cppEntriesProvider, separateIc, ignoreUnknown, stateFilter, snapshotLogProcessor) {
devtools.profiler.LogReader.call(this, {
'shared-library': { parsers: [null, parseInt, parseInt],
processor: this.processSharedLibrary },
@@ -72,6 +137,8 @@
processor: this.processCodeMove, backrefs: true },
'code-delete': { parsers: [this.createAddressParser('code')],
processor: this.processCodeDelete, backrefs: true },
+ 'snapshot-pos': { parsers: [this.createAddressParser('code'), parseInt],
+ processor: this.processSnapshotPosition, backrefs: true },
'tick': { parsers: [this.createAddressParser('code'),
this.createAddressParser('stack'), parseInt, 'var-args'],
processor: this.processTick, backrefs: true },
@@ -95,6 +162,8 @@
this.cppEntriesProvider_ = cppEntriesProvider;
this.ignoreUnknown_ = ignoreUnknown;
this.stateFilter_ = stateFilter;
+ this.snapshotLogProcessor_ = snapshotLogProcessor;
+ this.deserializedEntriesNames_ = [];
var ticks = this.ticks_ =
{ total: 0, unaccounted: 0, excluded: 0, gc: 0 };
@@ -202,6 +271,7 @@
TickProcessor.prototype.processCodeCreation = function(
type, start, size, name) {
+ name = this.deserializedEntriesNames_[start] || name;
var entry = this.profile_.addCode(
this.expandAlias(type), name, start, size);
};
@@ -217,6 +287,14 @@
};
+TickProcessor.prototype.processSnapshotPosition = function(addr, pos) {
+ if (this.snapshotLogProcessor_) {
+ this.deserializedEntriesNames_[addr] =
+ this.snapshotLogProcessor_.getSerializedEntryName(pos);
+ }
+};
+
+
TickProcessor.prototype.includeTick = function(vmState) {
return this.stateFilter_ == null || this.stateFilter_ == vmState;
};
@@ -648,7 +726,9 @@
'--mac': ['platform', 'mac',
'Specify that we are running on Mac OS X platform'],
'--nm': ['nm', 'nm',
- 'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)']
+ 'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
+ '--snapshot-log': ['snapshotLogFileName', 'snapshot.log',
+ 'Specify snapshot log file to use (e.g. --snapshot-log=snapshot.log)']
};
this.argsDispatch_['--js'] = this.argsDispatch_['-j'];
this.argsDispatch_['--gc'] = this.argsDispatch_['-g'];
@@ -660,6 +740,7 @@
ArgumentsProcessor.DEFAULTS = {
logFileName: 'v8.log',
+ snapshotLogFileName: null,
platform: 'unix',
stateFilter: null,
ignoreUnknown: false,