Created
March 31, 2016 11:54
-
-
Save vivanishin/8d49168f694c65cca9c665fecddb851e to your computer and use it in GitHub Desktop.
Diffs of github.com/ispras/llv8.git and github.com/ispras/llvm-for-v8 from their respective trunks
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/.gitignore b/.gitignore | |
index 6afb7f5..82ac8cc 100644 | |
--- a/.gitignore | |
+++ b/.gitignore | |
@@ -6,6 +6,7 @@ | |
*.map | |
*.mk | |
*.ncb | |
+*.orig | |
*.pdb | |
*.pyc | |
*.scons* | |
@@ -13,6 +14,7 @@ | |
*.sln | |
*.so | |
*.suo | |
+*.swp | |
*.user | |
*.vcproj | |
*.vcxproj | |
@@ -20,9 +22,11 @@ | |
*.xcodeproj | |
#*# | |
*~ | |
+.autotools | |
.cpplint-cache | |
.cproject | |
.d8_history | |
+.gclient | |
.gclient_entries | |
.landmines | |
.project | |
@@ -78,6 +82,7 @@ shell_g | |
/tools/luci-go/win64/isolate.exe | |
/tools/oom_dump/oom_dump | |
/tools/oom_dump/oom_dump.o | |
+/tools/out | |
/tools/swarming_client | |
/tools/visual_studio/Debug | |
/tools/visual_studio/Release | |
diff --git a/BUILD.gn b/BUILD.gn | |
index 1a93a34..759dfa5 100644 | |
--- a/BUILD.gn | |
+++ b/BUILD.gn | |
@@ -1075,6 +1075,17 @@ source_set("v8_base") { | |
"src/lithium-codegen.h", | |
"src/lithium.cc", | |
"src/lithium.h", | |
+ "src/llvm/llvm-chunk.cc", | |
+ "src/llvm/llvm-chunk.h", | |
+ "src/llvm/llvm-headers.h", | |
+ "src/llvm/llvm-stackmaps.cc", | |
+ "src/llvm/llvm-stackmaps.h", | |
+ "src/llvm/mcjit-memory-manager.cc", | |
+ "src/llvm/mcjit-memory-manager.h", | |
+ "src/llvm/pass-normalize-phis.cc", | |
+ "src/llvm/pass-normalize-phis.h", | |
+ "src/llvm/pass-rewrite-safepoints.cc", | |
+ "src/llvm/pass-rewrite-safepoints.h", | |
"src/log-inl.h", | |
"src/log-utils.cc", | |
"src/log-utils.h", | |
@@ -1082,6 +1093,8 @@ source_set("v8_base") { | |
"src/log.h", | |
"src/lookup.cc", | |
"src/lookup.h", | |
+ "src/low-chunk.cc", | |
+ "src/low-chunk.h", | |
"src/macro-assembler.h", | |
"src/messages.cc", | |
"src/messages.h", | |
diff --git a/README.md b/README.md | |
index 804df5e..2d91a0b 100644 | |
--- a/README.md | |
+++ b/README.md | |
@@ -1,40 +1,197 @@ | |
-V8 JavaScript Engine | |
+LLV8 | |
============= | |
+LLV8 is an experimental top-tier compiler for V8. It leverages the power of LLVM MCJIT to produce highly optimized code. It is supposed to be used as a third tier for cases where it makes sense to spend more time compiling to achieve higher throughput. | |
-V8 is Google's open source JavaScript engine. | |
+LLV8 (backend) is implemented as a patch to V8 and it cannot function without the virtual machine. Although LLV8 is only a fraction of the entire patched VM, we also refer to the whole thing (our fork of V8) as LLV8. | |
-V8 implements ECMAScript as specified in ECMA-262. | |
+LLV8 codebase effectively consists of two repositories, both of which are hosted at github: | |
+ - [LLVM fork ](https://github.com/ispras/llvm-for-v8) | |
+ - [V8 fork (LLV8)](https://github.com/ispras/llv8) | |
-V8 is written in C++ and is used in Google Chrome, the open source | |
-browser from Google. | |
- | |
-V8 can run standalone, or can be embedded into any C++ application. | |
- | |
-V8 Project page: https://code.google.com/p/v8/ | |
- | |
- | |
-Getting the Code | |
+Building LLV8 | |
+============= | |
+Building instructions can be found in the project's [wiki](https://github.com/ispras/llv8/wiki/Building%20with%20Gyp). They are duplicated in this readme for convenience. | |
+ | |
+We are going to check out sources of LLV8 and the modified LLVM. V8 (and thus LLV8) comes with binaries of a clang compiler, so we are going to use it to build both LLVM and LLV8 to avoid linking problems. | |
+ | |
+### Checking out LLV8 | |
+ | |
+The easiest way is to follow the process of building regular V8, adding the LLV8 branch as a remote. | |
+ | |
+``` | |
+cd $LLV8_ROOT # Choose any directory you want. | |
+# E.g. cd /some/dir && export LLV8_ROOT=`pwd` | |
+``` | |
+ | |
+Install [depot_tools](http://www.chromium.org/developers/how-tos/install-depot-tools): | |
+``` | |
+git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git | |
+export PATH=`pwd`/depot_tools:"$PATH" | |
+``` | |
+Fetch all the code: | |
+``` | |
+fetch v8 | |
+cd v8 | |
+git remote add llv8 https://github.com/ispras/llv8.git | |
+git pull llv8 llv8 | |
+git checkout llv8 | |
+gclient sync | |
+``` | |
+Note that we don't run make yet, since first we need to build LLVM libraries to link against. | |
+We had to check out V8 first to obtain the clang compiler though. | |
+ | |
+### LLVM | |
+ | |
+Check out and build our version of LLVM: | |
+``` | |
+cd $LLV8_ROOT | |
+git clone https://github.com/ispras/llvm-for-v8.git | |
+mkdir build-llvm | |
+cd build-llvm | |
+export CC=$LLV8_ROOT/v8/third_party/llvm-build/Release+Asserts/bin/clang | |
+export CXX=$LLV8_ROOT/v8/third_party/llvm-build/Release+Asserts/bin/clang++ | |
+../llvm-for-v8/configure --enable-assertions --enable-optimized --disable-zlib | |
+make -j9 | |
+sudo make install # Note: this installs the built version of llvm system-wide. | |
+``` | |
+You can in theory pass `--prefix` to configure or not install llvm at all and use it from the build directory, because all you need to build llv8 is the `llvm-configure` of this freshly built llvm in your `$PATH`. | |
+But this makes the subsequent compilation of llv8 a bit more involved (the C++ compiler spews warnings-errors as it compiles LLVM headers). | |
+ | |
+### Building LLV8 | |
+ | |
+Finally, run make (substitute "release" for "debug" if you'd like to test performance): | |
+``` | |
+cd $LLV8_ROOT/v8 | |
+export LINK="" # V8's make uses third-party linker anyway. | |
+make x64.debug -j9 i18nsupport=off gdbjit=off | |
+``` | |
+ | |
+Project documentation | |
============= | |
-Checkout [depot tools](http://www.chromium.org/developers/how-tos/install-depot-tools), and run | |
- | |
- fetch v8 | |
- | |
-This will checkout V8 into the directory `v8` and fetch all of its dependencies. | |
-To stay up to date, run | |
- | |
- git pull origin | |
- gclient sync | |
- | |
-For fetching all branches, add the following into your remote | |
-configuration in `.git/config`: | |
- | |
- fetch = +refs/branch-heads/*:refs/remotes/branch-heads/* | |
- fetch = +refs/tags/*:refs/tags/* | |
- | |
+Design documentation, building and runnig insructions can be found on | |
+[LLV8 wiki](https://github.com/ispras/llv8/wiki). | |
-Contributing | |
+Usage example | |
============= | |
- | |
-Please follow the instructions mentioned on the | |
-[V8 wiki](https://code.google.com/p/v8-wiki/wiki/Contributing). | |
+Let's compile a simple piece of JavaScript code with LLV8. | |
+ | |
+``` | |
+cat > a-plus-b.js | |
+``` | |
+``` | |
+var N = 10000; // Should be big enough to pass optimization threshold. | |
+ | |
+function foo(a, b) { | |
+ return a + b; | |
+} | |
+ | |
+var k = 1; | |
+for (var i = 0; i < N; i++) { | |
+ k += foo(i, i % k); | |
+} | |
+ | |
+print(k); | |
+``` | |
+Now run it through LLV8: | |
+``` | |
+$LLV8_ROOT/v8/out/x64.debug/d8 a-plus-b.js --llvm-filter=* | |
+``` | |
+It should spew a lot of debug information to stderr, mostly LLVM IR before and after various passes and disassembly. Here is an abridged stderr output: | |
+``` | |
+... | |
+====================vvv Module AFTER optimization vvv==================== | |
+; ModuleID = '0' | |
+target triple = "x86_64-unknown-linux-gnu" | |
+ | |
+define x86_64_v8cc i8* @"0"(i8* %pointer_0, i8* nocapture readnone, i8* nocapture readnone, i8* %pointer_1, i8* %pointer_2, i8* %pointer_3) #0 gc "v8-gc" { | |
+BlockEntry0: | |
+ tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 0, i32 0) | |
+ %2 = ptrtoint i8* %pointer_2 to i64 | |
+ %3 = and i64 %2, 1 | |
+ %4 = icmp eq i64 %3, 0 | |
+ br i1 %4, label %BlockCont, label %DeoptBlock | |
+ | |
+BlockCont: ; preds = %BlockEntry0 | |
+ %5 = ptrtoint i8* %pointer_1 to i64 | |
+ %6 = and i64 %5, 1 | |
+ %7 = icmp eq i64 %6, 0 | |
+ br i1 %7, label %BlockCont1, label %DeoptBlock2 | |
+ | |
+DeoptBlock: ; preds = %BlockEntry0 | |
+ %8 = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 1, i32 5, i8* null, i32 0, i8* %pointer_3, i8* %pointer_2, i8* %pointer_1, i8* %pointer_0) | |
+ unreachable | |
+ | |
+BlockCont1: ; preds = %BlockCont | |
+ %9 = tail call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %2, i64 %5) | |
+ %10 = extractvalue { i64, i1 } %9, 1 | |
+ br i1 %10, label %DeoptBlock4, label %BlockCont3 | |
+ | |
+DeoptBlock2: ; preds = %BlockCont | |
+ %11 = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 5, i8* null, i32 0, i8* %pointer_3, i8* %pointer_2, i8* %pointer_1, i8* %pointer_0) | |
+ unreachable | |
+ | |
+BlockCont3: ; preds = %BlockCont1 | |
+ %12 = extractvalue { i64, i1 } %9, 0 | |
+ %13 = inttoptr i64 %12 to i8* | |
+ ret i8* %13 | |
+ | |
+DeoptBlock4: ; preds = %BlockCont1 | |
+ %14 = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 3, i32 5, i8* null, i32 0, i8* %pointer_3, i8* %pointer_2, i8* %pointer_1, i8* %pointer_0) | |
+ unreachable | |
+} | |
+ | |
+declare void @llvm.experimental.stackmap(i64, i32, ...) | |
+ | |
+declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...) | |
+ | |
+; Function Attrs: nounwind readnone | |
+declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64) #1 | |
+ | |
+attributes #0 = { "no-frame-pointer-elim"="true" "put-constantpool-in-fn-section"="true" "put-jumptable-in-fn-section"="true" } | |
+attributes #1 = { nounwind readnone } | |
+====================^^^ Module AFTER optimization ^^^==================== | |
+... | |
+ Version: 1 | |
+ StackSizes: | |
+ (off:48232192, size:24) | |
+ Constants: | |
+ Records: | |
+ (#0, offset = 18, flags = 0, locations = [] live_outs = []) | |
+ (#1, offset = 42, flags = 0, locations = [(Register, rcx, off:0, size:8), (Register, rdx, off:0, size:8), (Register, rbx, off:0, size:8), (Register, rsi, off:0, size:8), ] live_outs = []) | |
+ (#2, offset = 47, flags = 0, locations = [(Register, rcx, off:0, size:8), (Register, rdx, off:0, size:8), (Register, rbx, off:0, size:8), (Register, rsi, off:0, size:8), ] live_outs = []) | |
+ (#3, offset = 52, flags = 0, locations = [(Register, rcx, off:0, size:8), (Register, rdx, off:0, size:8), (Register, rbx, off:0, size:8), (Register, rsi, off:0, size:8), ] live_outs = []) | |
+Instruction start: 0xa31d33a040 | |
+0xa31d33a040 push rbp | |
+0xa31d33a041 mov rbp, rsp | |
+0xa31d33a044 push rsi | |
+0xa31d33a045 push rdi | |
+0xa31d33a046 mov rcx, qword ptr [rbp + 0x20] | |
+0xa31d33a04a mov rdx, qword ptr [rbp + 0x18] | |
+0xa31d33a04e mov rbx, qword ptr [rbp + 0x10] | |
+0xa31d33a052 test dl, 0x1 | |
+0xa31d33a055 jne 0x13 | |
+0xa31d33a057 test bl, 0x1 | |
+0xa31d33a05a jne 0x13 | |
+0xa31d33a05c mov rax, rdx | |
+0xa31d33a05f add rax, rbx | |
+0xa31d33a062 jo 0x10 | |
+0xa31d33a064 pop rdi | |
+0xa31d33a065 pop rsi | |
+0xa31d33a066 pop rbp | |
+0xa31d33a067 ret 0x18 | |
+0xa31d33a06a call -0x33406f | |
+0xa31d33a06f call -0x33406a | |
+0xa31d33a074 call -0x334065 | |
+... | |
+RelocInfo (size = 6) | |
+0xa31d33a06b runtime entry (deoptimization bailout 0) | |
+0xa31d33a070 runtime entry (deoptimization bailout 1) | |
+0xa31d33a075 runtime entry (deoptimization bailout 2) | |
+ | |
+Safepoints (size = 8) | |
+``` | |
+And of course the answer is printed to stdout. | |
+``` | |
+99989998 | |
+``` | |
diff --git a/build/standalone.gypi b/build/standalone.gypi | |
index 7250579..d29a908 100644 | |
--- a/build/standalone.gypi | |
+++ b/build/standalone.gypi | |
@@ -47,6 +47,7 @@ | |
'msvs_multi_core_compile%': '1', | |
'mac_deployment_target%': '10.5', | |
'release_extra_cflags%': '', | |
+ 'llvm_config': 'llvm-config', | |
'variables': { | |
'variables': { | |
'variables': { | |
@@ -663,8 +664,16 @@ | |
'-fno-exceptions', | |
'-fno-rtti', | |
'-std=gnu++0x', | |
+ '<!@(<(llvm_config) --cxxflags)', #TODO(llvm) | |
], | |
- 'ldflags': [ '-pthread', ], | |
+ 'cflags_cc!': [ '-Wcast-qual', '-O3', '-Wformat-pedantic', ], #FIXME(llvm): this must be conditional | |
+ 'ldflags': [ | |
+ '-pthread', | |
+ #FIXME(llvm): libs and ldflags should probably be in different places | |
+ '<!@(<(llvm_config) --ldflags --libs core mcjit interpreter analysis native --system-libs)', | |
+ '-lncurses', # FIXME(llvm): it seems we only need this for debug (also get rid later). | |
+ ], | |
+ 'cflags_cc!': [ '-Wcast-qual', '-O3' ], #FIXME(llvm): this must be conditional | |
'conditions': [ | |
[ 'clang==1 and (v8_target_arch=="x64" or v8_target_arch=="arm64" \ | |
or v8_target_arch=="mips64el")', { | |
@@ -908,6 +917,7 @@ | |
'SYMROOT': '<(DEPTH)/xcodebuild', | |
'USE_HEADERMAP': 'NO', | |
'OTHER_CFLAGS': [ | |
+ "<!@(<(llvm_config) --cxxflags)", #works! | |
'-fno-strict-aliasing', | |
], | |
'WARNING_CFLAGS': [ | |
@@ -927,7 +937,7 @@ | |
['clang==1', { | |
'xcode_settings': { | |
'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0', | |
- 'CLANG_CXX_LANGUAGE_STANDARD': 'gnu++0x', # -std=gnu++0x | |
+ 'CLANG_CXX_LANGUAGE_STANDARD': 'c++11', # -std=gnu++0x | |
}, | |
'conditions': [ | |
['v8_target_arch=="x64" or v8_target_arch=="arm64" \ | |
diff --git a/build/toolchain.gypi b/build/toolchain.gypi | |
index 7c96144..4334329 100644 | |
--- a/build/toolchain.gypi | |
+++ b/build/toolchain.gypi | |
@@ -1143,7 +1143,11 @@ | |
['OS=="linux" and disable_glibcxx_debug==0', { | |
# Enable libstdc++ debugging facilities to help catch problems | |
# early, see http://crbug.com/65151 . | |
- 'defines': ['_GLIBCXX_DEBUG=1',], | |
+ # TODO(llvm): we shouldn't mix different _GLIBCXX_DEBUG values | |
+ # for compiling v8 and llvm. Otherwise we'll exeperience crashes | |
+ # due to different field offsets of the same field. | |
+ # So just disabling for now. It should be conditional though. | |
+ #defines': ['_GLIBCXX_DEBUG=1',], | |
}], | |
['OS=="aix"', { | |
'ldflags': [ '-Wl,-bbigtoc' ], | |
diff --git a/llv8-regtests/array/typed-arrays/uint16-array.js b/llv8-regtests/array/typed-arrays/uint16-array.js | |
new file mode 100644 | |
index 0000000..c10a848 | |
--- /dev/null | |
+++ b/llv8-regtests/array/typed-arrays/uint16-array.js | |
@@ -0,0 +1,15 @@ | |
+// FIXME(llvm): this test fails with --llvm-filter=* | |
+function foo() { | |
+ var arr = new Uint16Array(100); | |
+ arr[0] = 3 | |
+ for (var i = 1; i < arr.length; i++) { | |
+ arr[i] = arr[i - 1] * arr[i - 1] | |
+ } | |
+ return arr[arr.length / 4] | |
+} | |
+ | |
+var a = 0 | |
+var ITER = 1000 | |
+for (var i = 1; i < ITER; i++) | |
+ a += foo() | |
+print(a) | |
diff --git a/llv8-regtests/c++/Makefile b/llv8-regtests/c++/Makefile | |
new file mode 100644 | |
index 0000000..d79dd1b | |
--- /dev/null | |
+++ b/llv8-regtests/c++/Makefile | |
@@ -0,0 +1,29 @@ | |
+.PHONY: clean all | |
+ | |
+all: remdi.js il.js 407.js cunning-fox.js 257.js 111.js generating-funciton.js vector-double.js | |
+ | |
+vector-double.js: vector-double.cc | |
+ /media/big/code-external/emsdk_portable/emscripten/master/em++ -std=c++11 -g -O3 vector-double.cc -o vector-double.js | |
+generating-funciton.js: generating-funciton.cc | |
+ /media/big/code-external/emsdk_portable/emscripten/master/em++ -std=c++11 -g -O3 generating-funciton.cc -o generating-funciton.js | |
+remdi.js: remdi.cc /tmp/gen.cc | |
+ /media/big/code-external/emsdk_portable/emscripten/master/em++ -std=c++11 -g -O3 remdi.cc -o remdi.js | |
+il.js: il.cc /tmp/gen.cc | |
+ /media/big/code-external/emsdk_portable/emscripten/master/em++ -std=c++11 -O1 il.cc -o il.js | |
+present.js: present.cc /tmp/gen.cc | |
+ /media/big/code-external/emsdk_portable/emscripten/master/em++ -std=c++11 -O1 present.cc -o present.js | |
+407.js: 407.cc /tmp/gen.cc | |
+ /media/big/code-external/emsdk_portable/emscripten/master/em++ -std=c++11 -O1 407.cc -o 407.js | |
+/tmp/gen.cc: gen-407.py | |
+ python gen-407.py | |
+cunning-fox.js: cunning-fox.cc | |
+ python gen-aa.py | |
+ /media/big/code-external/emsdk_portable/emscripten/master/em++ -std=c++11 -O1 cunning-fox.cc -o cunning-fox.js | |
+multithread.js: multithread.cc | |
+ /media/big/code-external/emsdk_portable/emscripten/master/em++ -std=c++11 -O1 multithread.cc -o multithread.js | |
+257.js: 257.cc | |
+ /media/big/code-external/emsdk_portable/emscripten/master/em++ -std=c++11 -O1 257.cc -o 257.js | |
+111.js: 111.cc | |
+ /media/big/code-external/emsdk_portable/emscripten/master/em++ -std=c++11 -O3 111.cc -o 111.js | |
+clean: | |
+ rm *.js | |
diff --git a/llv8-regtests/c++/generating-funciton.cc b/llv8-regtests/c++/generating-funciton.cc | |
new file mode 100644 | |
index 0000000..f7a8049 | |
--- /dev/null | |
+++ b/llv8-regtests/c++/generating-funciton.cc | |
@@ -0,0 +1,47 @@ | |
+#include <iostream> | |
+#include <vector> | |
+#include <set> | |
+#include <map> | |
+#include <algorithm> | |
+#include <cstdint> | |
+#include <cstring> | |
+#include <climits> | |
+#include <iomanip> | |
+using namespace std; | |
+ | |
+ | |
+int power(int x, int n, int mod) { | |
+ int cur = 1; | |
+ while (n != 0) { | |
+ if (n % 2 != 0) | |
+ cur = (cur * x) % mod; | |
+ x = x * x; | |
+ n = n / 2; | |
+ } | |
+ return cur; | |
+} | |
+ | |
+int factorial(int n) { | |
+ int cur = 1; | |
+ while (n > 1) { | |
+ cur *= n; | |
+ n--; | |
+ } | |
+ return cur; | |
+} | |
+ | |
+int main() { | |
+ int n, M, k, N; | |
+ N = 100000 * 100; | |
+ vector<int> c = { -1, 345, 451, 12516, 1478, -675, -57854, -567, -65582, 22, -2134 }; | |
+ M = c.size(); | |
+ int sum = 0; | |
+ int x = 22; | |
+ int MOD = 10000007; | |
+ for (n = 0; n < N; n++) { | |
+ int k = n % M; | |
+ sum += (c[k] * power(x, n, MOD) + n / factorial(k)) % MOD; | |
+ } | |
+ cout << sum << endl; | |
+ return 0; | |
+} | |
diff --git a/llv8-regtests/c++/vector-double.cc b/llv8-regtests/c++/vector-double.cc | |
new file mode 100644 | |
index 0000000..f53c511 | |
--- /dev/null | |
+++ b/llv8-regtests/c++/vector-double.cc | |
@@ -0,0 +1,20 @@ | |
+#include <iostream> | |
+#include <vector> | |
+ | |
+constexpr int ITER = 1000; | |
+ | |
+int main() { | |
+ int n, t; | |
+ double p = 0.18; | |
+ n = 1000; | |
+ t = 5; | |
+ | |
+ std::vector<double> P(n); | |
+ for (int ii = 0; ii < ITER; ii++) { | |
+ P[0] = p; | |
+ for (int j = 1; j < n; j++) | |
+ P[j] = P[j - 1] * P[j - 1]; | |
+ } | |
+ std::cout << P[t] << std::endl; | |
+ return 0; | |
+} | |
diff --git a/llv8-regtests/runtests.py b/llv8-regtests/runtests.py | |
new file mode 100755 | |
index 0000000..e282c68 | |
--- /dev/null | |
+++ b/llv8-regtests/runtests.py | |
@@ -0,0 +1,82 @@ | |
+#!/usr/bin/python | |
+ | |
+import argparse | |
+import os | |
+import sys | |
+import subprocess | |
+import inspect | |
+ | |
+file_suffix = ".js" | |
+v8_options = ["--allow-natives-syntax", "--expose-gc",] | |
+llv8_options = v8_options + [ | |
+ "--llvm-filter=foo*", | |
+ "--noturbo", | |
+ "--noturbo-asm", | |
+# "--noconcurrent-recompilation", | |
+# "--noconcurrent-osr", | |
+# "--noinline-new", | |
+# "--nouse-osr", | |
+# "--nouse-inlining", | |
+ ] | |
+ | |
+null_file = open("/dev/null", "w") | |
+ | |
+arg_parser = argparse.ArgumentParser( | |
+ description="Run v8_path on tests and compare llv8 outputs with pure v8." | |
+ "Tests are expected to have " + file_suffix + " suffix and contain " | |
+ "a function foo() which will be llvmed (other funcitons won't be)." ) | |
+arg_parser.add_argument('--filter', | |
+ help="Use only tests which have FILTER as a substring") | |
+arg_parser.add_argument('--exclude', | |
+ help="The set of tests to be skipped (whose filename contains the substring)") | |
+arg_parser.add_argument('--src_root', | |
+ default=os.path.dirname(os.path.realpath(__file__)), | |
+ help="Root directory with tests") | |
+arg_parser.add_argument('v8_path', | |
+ nargs='?', # 0 or 1 | |
+ default="/home/vlad/code/blessed-v8/v8/out/x64.debug/d8") | |
+args = arg_parser.parse_args() | |
+ | |
+print args | |
+v8_path = args.v8_path | |
+src_root = args.src_root | |
+ | |
+class WrongAnswerException(Exception): | |
+ pass | |
+ | |
+def do_test(filename): | |
+ llv8_out = subprocess.check_output([v8_path] + llv8_options + [filename], stderr=null_file) | |
+ v8_out = subprocess.check_output([v8_path] + v8_options + [filename], stderr=null_file) | |
+ split_lambda = lambda output: filter(lambda x: x, output.split("\n")) | |
+ llv8_out = split_lambda(llv8_out) | |
+ v8_out = split_lambda(v8_out) | |
+ if len(v8_out) == 0 and len(llv8_out) == 0: | |
+ return | |
+ elif len(v8_out) == 0 or len(llv8_out) == 0: | |
+ raise WrongAnswerException("llv8 error: WA | empty output") | |
+ elif llv8_out[-1] != v8_out[-1]: | |
+ print "llv8:\t", llv8_out[-1] | |
+ print "v8:\t", v8_out[-1] | |
+ raise WrongAnswerException("llv8 error: WA") | |
+ | |
+failed = [] | |
+tested_cnt = 0 | |
+for root, dirs, files in os.walk(src_root): | |
+ lst = [root + '/' + i for i in files if i.endswith(file_suffix)] | |
+ for src_file in lst: | |
+ if args.exclude and args.exclude in src_file: continue | |
+ if args.filter and args.filter not in src_file: continue | |
+ tested_cnt += 1 | |
+ try: | |
+ print src_file | |
+ do_test(src_file) | |
+ print "\tOK" | |
+ except Exception as e: | |
+ failed += [src_file] | |
+ print "\tFAILED!" | |
+ print e | |
+print "\n=======RESULTS=======" | |
+print str(len(failed)) + "/" + str(tested_cnt), "tests failed" | |
+for test in failed: | |
+ print test | |
+null_file.close() | |
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc | |
index 87cbf09..5ee5334 100644 | |
--- a/src/arm/lithium-arm.cc | |
+++ b/src/arm/lithium-arm.cc | |
@@ -420,14 +420,14 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { | |
LPlatformChunk* LChunkBuilder::Build() { | |
DCHECK(is_unused()); | |
chunk_ = new(zone()) LPlatformChunk(info(), graph()); | |
- LPhase phase("L_Building chunk", chunk_); | |
+ LPhase phase("L_Building chunk", chunk()); | |
status_ = BUILDING; | |
// If compiling for OSR, reserve space for the unoptimized frame, | |
// which will be subsumed into this frame. | |
if (graph()->has_osr()) { | |
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) { | |
- chunk_->GetNextSpillIndex(GENERAL_REGISTERS); | |
+ chunk()->GetNextSpillIndex(GENERAL_REGISTERS); | |
} | |
} | |
@@ -439,7 +439,7 @@ LPlatformChunk* LChunkBuilder::Build() { | |
if (is_aborted()) return NULL; | |
} | |
status_ = DONE; | |
- return chunk_; | |
+ return chunk(); | |
} | |
@@ -494,40 +494,40 @@ LOperand* LChunkBuilder::UseAtStart(HValue* value) { | |
LOperand* LChunkBuilder::UseOrConstant(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: Use(value); | |
} | |
LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseAtStart(value); | |
} | |
LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseRegister(value); | |
} | |
LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseRegisterAtStart(value); | |
} | |
LOperand* LChunkBuilder::UseConstant(HValue* value) { | |
- return chunk_->DefineConstantOperand(HConstant::cast(value)); | |
+ return chunk()->DefineConstantOperand(HConstant::cast(value)); | |
} | |
LOperand* LChunkBuilder::UseAny(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: Use(value, new(zone()) LUnallocated(LUnallocated::ANY)); | |
} | |
@@ -702,7 +702,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, | |
bool does_deopt = false; | |
if (right_value->IsConstant()) { | |
HConstant* constant = HConstant::cast(right_value); | |
- right = chunk_->DefineConstantOperand(constant); | |
+ right = chunk()->DefineConstantOperand(constant); | |
constant_value = constant->Integer32Value() & 0x1f; | |
// Left shifts can deoptimize if we shift by > 0 and the result cannot be | |
// truncated to smi. | |
@@ -810,7 +810,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { | |
argument_count_ = pred->argument_count(); | |
} | |
HInstruction* current = block->first(); | |
- int start = chunk_->instructions()->length(); | |
+ int start = chunk()->instructions()->length(); | |
while (current != NULL && !is_aborted()) { | |
// Code for constants in registers is generated lazily. | |
if (!current->EmitAtUses()) { | |
@@ -818,7 +818,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { | |
} | |
current = current->next(); | |
} | |
- int end = chunk_->instructions()->length() - 1; | |
+ int end = chunk()->instructions()->length() - 1; | |
if (end >= start) { | |
block->set_first_instruction_index(start); | |
block->set_last_instruction_index(end); | |
@@ -847,7 +847,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { | |
LInstruction* dummy = | |
new(zone()) LDummyUse(UseAny(current->OperandAt(i))); | |
dummy->set_hydrogen_value(current); | |
- chunk_->AddInstruction(dummy, current_block_); | |
+ chunk()->AddInstruction(dummy, current_block_); | |
} | |
} else { | |
HBasicBlock* successor; | |
@@ -913,7 +913,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr, | |
if (FLAG_stress_environments && !instr->HasEnvironment()) { | |
instr = AssignEnvironment(instr); | |
} | |
- chunk_->AddInstruction(instr, current_block_); | |
+ chunk()->AddInstruction(instr, current_block_); | |
if (instr->IsCall() || instr->IsPrologue()) { | |
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val; | |
@@ -924,7 +924,13 @@ void LChunkBuilder::AddInstruction(LInstruction* instr, | |
} | |
LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout()); | |
bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout); | |
- chunk_->AddInstruction(bailout, current_block_); | |
+ chunk()->AddInstruction(bailout, current_block_); | |
+ if (instruction_needing_environment != NULL) { | |
+ // Store the lazy deopt environment with the instruction if needed. | |
+ // Right now it is only used for LInstanceOfKnownGlobal. | |
+ instruction_needing_environment-> | |
+ SetDeferredLazyDeoptimizationEnvironment(bailout->environment()); | |
+ } | |
} | |
} | |
@@ -2627,7 +2633,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { | |
inner->BindContext(instr->closure_context()); | |
inner->set_entry(instr); | |
current_block_->UpdateEnvironment(inner); | |
- chunk_->AddInlinedFunction(instr->shared()); | |
+ chunk()->AddInlinedFunction(instr->shared()); | |
return NULL; | |
} | |
diff --git a/src/arm64/lithium-arm64.cc b/src/arm64/lithium-arm64.cc | |
index 5cc5b9d..f27e362 100644 | |
--- a/src/arm64/lithium-arm64.cc | |
+++ b/src/arm64/lithium-arm64.cc | |
@@ -434,7 +434,7 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { | |
LConstantOperand* LChunkBuilder::UseConstant(HValue* value) { | |
- return chunk_->DefineConstantOperand(HConstant::cast(value)); | |
+ return chunk()->DefineConstantOperand(HConstant::cast(value)); | |
} | |
@@ -579,7 +579,7 @@ LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) { | |
LPlatformChunk* LChunkBuilder::Build() { | |
DCHECK(is_unused()); | |
chunk_ = new(zone()) LPlatformChunk(info_, graph_); | |
- LPhase phase("L_Building chunk", chunk_); | |
+ LPhase phase("L_Building chunk", chunk()); | |
status_ = BUILDING; | |
// If compiling for OSR, reserve space for the unoptimized frame, | |
@@ -588,7 +588,7 @@ LPlatformChunk* LChunkBuilder::Build() { | |
// TODO(all): GetNextSpillIndex just increments a field. It has no other | |
// side effects, so we should get rid of this loop. | |
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) { | |
- chunk_->GetNextSpillIndex(); | |
+ chunk()->GetNextSpillIndex(); | |
} | |
} | |
@@ -598,7 +598,7 @@ LPlatformChunk* LChunkBuilder::Build() { | |
if (is_aborted()) return NULL; | |
} | |
status_ = DONE; | |
- return chunk_; | |
+ return chunk(); | |
} | |
@@ -653,7 +653,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block) { | |
// Translate hydrogen instructions to lithium ones for the current block. | |
HInstruction* current = block->first(); | |
- int start = chunk_->instructions()->length(); | |
+ int start = chunk()->instructions()->length(); | |
while ((current != NULL) && !is_aborted()) { | |
// Code for constants in registers is generated lazily. | |
if (!current->EmitAtUses()) { | |
@@ -661,7 +661,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block) { | |
} | |
current = current->next(); | |
} | |
- int end = chunk_->instructions()->length() - 1; | |
+ int end = chunk()->instructions()->length() - 1; | |
if (end >= start) { | |
block->set_first_instruction_index(start); | |
block->set_last_instruction_index(end); | |
@@ -689,7 +689,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { | |
LInstruction* dummy = | |
new(zone()) LDummyUse(UseAny(current->OperandAt(i))); | |
dummy->set_hydrogen_value(current); | |
- chunk_->AddInstruction(dummy, current_block_); | |
+ chunk()->AddInstruction(dummy, current_block_); | |
} | |
} else { | |
HBasicBlock* successor; | |
@@ -755,7 +755,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr, | |
if (FLAG_stress_environments && !instr->HasEnvironment()) { | |
instr = AssignEnvironment(instr); | |
} | |
- chunk_->AddInstruction(instr, current_block_); | |
+ chunk()->AddInstruction(instr, current_block_); | |
if (instr->IsCall() || instr->IsPrologue()) { | |
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val; | |
@@ -2007,7 +2007,7 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { | |
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { | |
LParameter* result = new(zone()) LParameter; | |
if (instr->kind() == HParameter::STACK_PARAMETER) { | |
- int spill_index = chunk_->GetParameterStackSlot(instr->index()); | |
+ int spill_index = chunk()->GetParameterStackSlot(instr->index()); | |
return DefineAsSpilled(result, spill_index); | |
} else { | |
DCHECK(info()->IsStub()); | |
@@ -2718,7 +2718,7 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { | |
int env_index = instr->index(); | |
int spill_index = 0; | |
if (instr->environment()->is_parameter_index(env_index)) { | |
- spill_index = chunk_->GetParameterStackSlot(env_index); | |
+ spill_index = chunk()->GetParameterStackSlot(env_index); | |
} else { | |
spill_index = env_index - instr->environment()->first_local_index(); | |
if (spill_index > LUnallocated::kMaxFixedSlotIndex) { | |
diff --git a/src/assembler.h b/src/assembler.h | |
index 50e9906..801b4af 100644 | |
--- a/src/assembler.h | |
+++ b/src/assembler.h | |
@@ -431,6 +431,10 @@ class RelocInfo { | |
: pc_(pc), rmode_(rmode), data_(data), host_(host) { | |
} | |
+ RelocInfo(Mode rmode, intptr_t data = NULL) | |
+ : pc_(NULL), rmode_(rmode), data_(data), host_(NULL) { | |
+ } | |
+ | |
static inline bool IsRealRelocMode(Mode mode) { | |
return mode >= FIRST_REAL_RELOC_MODE && | |
mode <= LAST_REAL_RELOC_MODE; | |
@@ -515,6 +519,7 @@ class RelocInfo { | |
void set_pc(byte* pc) { pc_ = pc; } | |
Mode rmode() const { return rmode_; } | |
intptr_t data() const { return data_; } | |
+ void set_data(intptr_t data) { data_ = data; } | |
Code* host() const { return host_; } | |
void set_host(Code* host) { host_ = host; } | |
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc | |
index eafc6d3..d533cce 100644 | |
--- a/src/code-stubs-hydrogen.cc | |
+++ b/src/code-stubs-hydrogen.cc | |
@@ -9,12 +9,14 @@ | |
#include "src/hydrogen.h" | |
#include "src/ic/ic.h" | |
#include "src/lithium.h" | |
+#include "src/llvm/llvm-chunk.h" | |
+#include "src/low-chunk.h" | |
namespace v8 { | |
namespace internal { | |
-static LChunk* OptimizeGraph(HGraph* graph) { | |
+static LowChunk* OptimizeGraph(HGraph* graph) { | |
DisallowHeapAllocation no_allocation; | |
DisallowHandleAllocation no_handles; | |
DisallowHandleDereference no_deref; | |
@@ -24,7 +26,14 @@ static LChunk* OptimizeGraph(HGraph* graph) { | |
if (!graph->Optimize(&bailout_reason)) { | |
FATAL(GetBailoutReason(bailout_reason)); | |
} | |
- LChunk* chunk = LChunk::NewChunk(graph); | |
+ LowChunk* chunk; | |
+ if (!graph->info()->closure().is_null() && | |
+ graph->info()->closure()->PassesFilter(FLAG_llvm_filter)) { | |
+ chunk = LLVMChunk::NewChunk(graph); | |
+ // TODO(llvm): add logging | |
+ } else { | |
+ chunk = LChunk::NewChunk(graph); | |
+ } | |
if (chunk == NULL) { | |
FATAL(GetBailoutReason(graph->info()->bailout_reason())); | |
} | |
@@ -299,7 +308,7 @@ static Handle<Code> DoGenerateCode(Stub* stub) { | |
Zone zone; | |
CompilationInfo info(stub, isolate, &zone); | |
CodeStubGraphBuilder<Stub> builder(&info); | |
- LChunk* chunk = OptimizeGraph(builder.CreateGraph()); | |
+ LowChunk* chunk = OptimizeGraph(builder.CreateGraph()); | |
Handle<Code> code = chunk->Codegen(); | |
if (FLAG_profile_hydrogen_code_stub_compilation) { | |
OFStream os(stdout); | |
diff --git a/src/compiler.cc b/src/compiler.cc | |
index 4f4a40a..7a2357b 100644 | |
--- a/src/compiler.cc | |
+++ b/src/compiler.cc | |
@@ -20,6 +20,7 @@ | |
#include "src/interpreter/interpreter.h" | |
#include "src/isolate-inl.h" | |
#include "src/lithium.h" | |
+#include "src/llvm/llvm-chunk.h" | |
#include "src/log-inl.h" | |
#include "src/messages.h" | |
#include "src/parser.h" | |
@@ -546,7 +547,17 @@ OptimizedCompileJob::Status OptimizedCompileJob::OptimizeGraph() { | |
BailoutReason bailout_reason = kNoReason; | |
if (graph_->Optimize(&bailout_reason)) { | |
- chunk_ = LChunk::NewChunk(graph_); | |
+ if (!graph_->info()->closure().is_null() && | |
+ graph_->info()->closure()->PassesFilter(FLAG_llvm_filter)) { | |
+ // We cant build llvm chunk here if concurrent recompilation is enabled | |
+ // It must be moved to GenerateCode() function | |
+ //chunk_ = LLVMChunk::NewChunk(graph_); | |
+ // TODO(llvm): add logging | |
+ // FIXME(llvm): We actualy need to handle non succeeded cases | |
+ return SetLastStatus(SUCCEEDED); | |
+ } else { | |
+ chunk_ = LChunk::NewChunk(graph_); | |
+ } | |
if (chunk_ != NULL) return SetLastStatus(SUCCEEDED); | |
} else if (bailout_reason != kNoReason) { | |
graph_builder_->Bailout(bailout_reason); | |
@@ -574,13 +585,19 @@ OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() { | |
DisallowJavascriptExecution no_js(isolate()); | |
{ // Scope for timer. | |
Timer timer(this, &time_taken_to_codegen_); | |
- DCHECK(chunk_ != NULL); | |
+ // DCHECK(chunk_ != NULL); | |
DCHECK(graph_ != NULL); | |
// Deferred handles reference objects that were accessible during | |
// graph creation. To make sure that we don't encounter inconsistencies | |
// between graph creation and code generation, we disallow accessing | |
// objects through deferred handles during the latter, with exceptions. | |
DisallowDeferredHandleDereference no_deferred_handle_deref; | |
+ if (!graph_->info()->closure().is_null() && | |
+ graph_->info()->closure()->PassesFilter(FLAG_llvm_filter)) { | |
+ chunk_ = LLVMChunk::NewChunk(graph_); | |
+ } | |
+ // FIXME(llvm). chunk can be null, we need to abort if so. | |
+ DCHECK(chunk_ != NULL); | |
Handle<Code> optimized_code = chunk_->Codegen(); | |
if (optimized_code.is_null()) { | |
if (info()->bailout_reason() == kNoReason) { | |
diff --git a/src/compiler.h b/src/compiler.h | |
index cc1a955..63d4f20 100644 | |
--- a/src/compiler.h | |
+++ b/src/compiler.h | |
@@ -10,6 +10,7 @@ | |
#include "src/bailout-reason.h" | |
#include "src/compilation-dependencies.h" | |
#include "src/signature.h" | |
+//#include "src/low-chunk.h" | |
#include "src/zone.h" | |
namespace v8 { | |
@@ -540,6 +541,7 @@ class CompilationHandleScope BASE_EMBEDDED { | |
class HGraph; | |
class HOptimizedGraphBuilder; | |
class LChunk; | |
+class LowChunk; | |
// A helper class that calls the three compilation phases in | |
// Crankshaft and keeps track of its state. The three phases | |
@@ -590,7 +592,7 @@ class OptimizedCompileJob: public ZoneObject { | |
CompilationInfo* info_; | |
HOptimizedGraphBuilder* graph_builder_; | |
HGraph* graph_; | |
- LChunk* chunk_; | |
+ LowChunk* chunk_; | |
base::TimeDelta time_taken_to_create_graph_; | |
base::TimeDelta time_taken_to_optimize_; | |
base::TimeDelta time_taken_to_codegen_; | |
diff --git a/src/debug/liveedit.cc b/src/debug/liveedit.cc | |
index 8a936ac..fc036d7 100644 | |
--- a/src/debug/liveedit.cc | |
+++ b/src/debug/liveedit.cc | |
@@ -1218,82 +1218,6 @@ static int TranslatePosition(int original_position, | |
} | |
-// Auto-growing buffer for writing relocation info code section. This buffer | |
-// is a simplified version of buffer from Assembler. Unlike Assembler, this | |
-// class is platform-independent and it works without dealing with instructions. | |
-// As specified by RelocInfo format, the buffer is filled in reversed order: | |
-// from upper to lower addresses. | |
-// It uses NewArray/DeleteArray for memory management. | |
-class RelocInfoBuffer { | |
- public: | |
- RelocInfoBuffer(int buffer_initial_capicity, byte* pc) { | |
- buffer_size_ = buffer_initial_capicity + kBufferGap; | |
- buffer_ = NewArray<byte>(buffer_size_); | |
- | |
- reloc_info_writer_.Reposition(buffer_ + buffer_size_, pc); | |
- } | |
- ~RelocInfoBuffer() { | |
- DeleteArray(buffer_); | |
- } | |
- | |
- // As specified by RelocInfo format, the buffer is filled in reversed order: | |
- // from upper to lower addresses. | |
- void Write(const RelocInfo* rinfo) { | |
- if (buffer_ + kBufferGap >= reloc_info_writer_.pos()) { | |
- Grow(); | |
- } | |
- reloc_info_writer_.Write(rinfo); | |
- } | |
- | |
- Vector<byte> GetResult() { | |
- // Return the bytes from pos up to end of buffer. | |
- int result_size = | |
- static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer_.pos()); | |
- return Vector<byte>(reloc_info_writer_.pos(), result_size); | |
- } | |
- | |
- private: | |
- void Grow() { | |
- // Compute new buffer size. | |
- int new_buffer_size; | |
- if (buffer_size_ < 2 * KB) { | |
- new_buffer_size = 4 * KB; | |
- } else { | |
- new_buffer_size = 2 * buffer_size_; | |
- } | |
- // Some internal data structures overflow for very large buffers, | |
- // they must ensure that kMaximalBufferSize is not too large. | |
- if (new_buffer_size > kMaximalBufferSize) { | |
- V8::FatalProcessOutOfMemory("RelocInfoBuffer::GrowBuffer"); | |
- } | |
- | |
- // Set up new buffer. | |
- byte* new_buffer = NewArray<byte>(new_buffer_size); | |
- | |
- // Copy the data. | |
- int curently_used_size = | |
- static_cast<int>(buffer_ + buffer_size_ - reloc_info_writer_.pos()); | |
- MemMove(new_buffer + new_buffer_size - curently_used_size, | |
- reloc_info_writer_.pos(), curently_used_size); | |
- | |
- reloc_info_writer_.Reposition( | |
- new_buffer + new_buffer_size - curently_used_size, | |
- reloc_info_writer_.last_pc()); | |
- | |
- DeleteArray(buffer_); | |
- buffer_ = new_buffer; | |
- buffer_size_ = new_buffer_size; | |
- } | |
- | |
- RelocInfoWriter reloc_info_writer_; | |
- byte* buffer_; | |
- int buffer_size_; | |
- | |
- static const int kBufferGap = RelocInfoWriter::kMaxSize; | |
- static const int kMaximalBufferSize = 512*MB; | |
-}; | |
- | |
- | |
// Patch positions in code (changes relocation info section) and possibly | |
// returns new instance of code. | |
static Handle<Code> PatchPositionsInCode( | |
diff --git a/src/debug/liveedit.h b/src/debug/liveedit.h | |
index 29fe605..18bf474 100644 | |
--- a/src/debug/liveedit.h | |
+++ b/src/debug/liveedit.h | |
@@ -27,6 +27,7 @@ | |
#include "src/allocation.h" | |
#include "src/compiler.h" | |
+#include "src/v8.h" | |
namespace v8 { | |
namespace internal { | |
@@ -364,7 +365,82 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> { | |
friend class JSArrayBasedStruct<SharedInfoWrapper>; | |
}; | |
-} // namespace internal | |
-} // namespace v8 | |
+// Auto-growing buffer for writing relocation info code section. This buffer | |
+// is a simplified version of buffer from Assembler. Unlike Assembler, this | |
+// class is platform-independent and it works without dealing with instructions. | |
+// As specified by RelocInfo format, the buffer is filled in reversed order: | |
+// from upper to lower addresses. | |
+// It uses NewArray/DeleteArray for memory management. | |
+class RelocInfoBuffer { | |
+ public: | |
+ RelocInfoBuffer(int buffer_initial_capicity, byte* pc) { | |
+ buffer_size_ = buffer_initial_capicity + kBufferGap; | |
+ buffer_ = NewArray<byte>(buffer_size_); | |
+ | |
+ reloc_info_writer_.Reposition(buffer_ + buffer_size_, pc); | |
+ } | |
+ ~RelocInfoBuffer() { | |
+ DeleteArray(buffer_); | |
+ } | |
+ | |
+ // As specified by RelocInfo format, the buffer is filled in reversed order: | |
+ // from upper to lower addresses. | |
+ void Write(const RelocInfo* rinfo) { | |
+ if (buffer_ + kBufferGap >= reloc_info_writer_.pos()) { | |
+ Grow(); | |
+ } | |
+ reloc_info_writer_.Write(rinfo); | |
+ } | |
+ | |
+ Vector<byte> GetResult() { | |
+ // Return the bytes from pos up to end of buffer. | |
+ int result_size = | |
+ static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer_.pos()); | |
+ return Vector<byte>(reloc_info_writer_.pos(), result_size); | |
+ } | |
+ | |
+ private: | |
+ void Grow() { | |
+ // Compute new buffer size. | |
+ int new_buffer_size; | |
+ if (buffer_size_ < 2 * KB) { | |
+ new_buffer_size = 4 * KB; | |
+ } else { | |
+ new_buffer_size = 2 * buffer_size_; | |
+ } | |
+ // Some internal data structures overflow for very large buffers, | |
+ // they must ensure that kMaximalBufferSize is not too large. | |
+ if (new_buffer_size > kMaximalBufferSize) { | |
+ V8::FatalProcessOutOfMemory("RelocInfoBuffer::GrowBuffer"); | |
+ } | |
+ | |
+ // Set up new buffer. | |
+ byte* new_buffer = NewArray<byte>(new_buffer_size); | |
+ | |
+ // Copy the data. | |
+ int curently_used_size = | |
+ static_cast<int>(buffer_ + buffer_size_ - reloc_info_writer_.pos()); | |
+ MemMove(new_buffer + new_buffer_size - curently_used_size, | |
+ reloc_info_writer_.pos(), curently_used_size); | |
+ | |
+ reloc_info_writer_.Reposition( | |
+ new_buffer + new_buffer_size - curently_used_size, | |
+ reloc_info_writer_.last_pc()); | |
+ | |
+ DeleteArray(buffer_); | |
+ buffer_ = new_buffer; | |
+ buffer_size_ = new_buffer_size; | |
+ } | |
+ | |
+ RelocInfoWriter reloc_info_writer_; | |
+ byte* buffer_; | |
+ int buffer_size_; | |
+ | |
+ static const int kBufferGap = RelocInfoWriter::kMaxSize; | |
+ static const int kMaximalBufferSize = 512*MB; | |
+}; | |
+ | |
+ | |
+} } // namespace v8::internal | |
#endif /* V8_DEBUG_LIVEEDIT_H_ */ | |
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc | |
index a234728..acba3e9 100644 | |
--- a/src/deoptimizer.cc | |
+++ b/src/deoptimizer.cc | |
@@ -1776,7 +1776,10 @@ unsigned Deoptimizer::ComputeInputFrameSize() const { | |
unsigned stack_slots = compiled_code_->stack_slots(); | |
unsigned outgoing_size = | |
ComputeOutgoingArgumentSize(compiled_code_, bailout_id_); | |
- CHECK(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size); | |
+ // For llvmed code, the reported number of stack_slots does not include the | |
+ // slots used for parameter passing although they are statically allocated. | |
+ CHECK(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size | |
+ || compiled_code_->is_llvmed()); | |
} | |
return result; | |
} | |
diff --git a/src/factory.cc b/src/factory.cc | |
index c66f90a..0a86af2 100644 | |
--- a/src/factory.cc | |
+++ b/src/factory.cc | |
@@ -1379,6 +1379,78 @@ Handle<Code> Factory::NewCodeRaw(int object_size, bool immovable) { | |
Code); | |
} | |
+Handle<Code> Factory::NewLLVMCode(const CodeDesc& desc, | |
+ const CodeDesc& safepoint_table_desc, | |
+ const Vector<byte>* reloc_data, | |
+ Code::Flags flags, | |
+ bool immovable, | |
+ int prologue_offset, | |
+ bool is_debug) { | |
+ DCHECK(desc.reloc_size == 0); | |
+ Handle<ByteArray> reloc_info = NewByteArray(reloc_data->length(), TENURED); | |
+ | |
+ auto align = kIntSize; | |
+ int delta = (align - (desc.instr_size & (align - 1))) & (align - 1); | |
+ auto total_instruction_size = desc.instr_size + | |
+ safepoint_table_desc.instr_size + delta; | |
+ // Compute size. | |
+ int body_size = RoundUp(total_instruction_size, kObjectAlignment); | |
+ int obj_size = Code::SizeFor(body_size); | |
+ | |
+ Handle<Code> code = NewCodeRaw(obj_size, immovable); | |
+ DCHECK(isolate()->code_range() == NULL || | |
+ !isolate()->code_range()->valid() || | |
+ isolate()->code_range()->contains(code->address())); | |
+ | |
+ // The code object has not been fully initialized yet. We rely on the | |
+ // fact that no allocation will happen from this point on. | |
+ DisallowHeapAllocation no_gc; | |
+ code->set_gc_metadata(Smi::FromInt(0)); | |
+ code->set_ic_age(isolate()->heap()->global_ic_age()); | |
+ code->set_instruction_size(total_instruction_size); | |
+ code->set_relocation_info(*reloc_info); | |
+ code->set_flags(flags); | |
+ code->set_raw_kind_specific_flags1(0); | |
+ code->set_raw_kind_specific_flags2(0); | |
+ code->set_is_llvmed(true); | |
+ code->set_deoptimization_data(*empty_fixed_array(), SKIP_WRITE_BARRIER); | |
+ code->set_raw_type_feedback_info(Smi::FromInt(0)); | |
+ code->set_next_code_link(*undefined_value()); | |
+ code->set_handler_table(*empty_fixed_array(), SKIP_WRITE_BARRIER); | |
+ code->set_prologue_offset(prologue_offset); | |
+ // FIXME(llvm): bad bad not good | |
+ code->set_is_crankshafted(false); | |
+ code->set_is_crankshafted(true); | |
+ code->set_safepoint_table_offset(desc.instr_size + delta); | |
+ if (code->kind() == Code::OPTIMIZED_FUNCTION) { | |
+ code->set_marked_for_deoptimization(false); | |
+ } | |
+ | |
+ if (is_debug) { | |
+ DCHECK(code->kind() == Code::FUNCTION); | |
+ code->set_has_debug_break_slots(true); | |
+ } | |
+ | |
+// desc.origin->PopulateConstantPool(*constant_pool); | |
+// code->set_constant_pool(*constant_pool); | |
+ | |
+// // Allow self references to created code object by patching the handle to | |
+// // point to the newly allocated Code object. | |
+// if (!self_ref.is_null()) *(self_ref.location()) = *code; | |
+ | |
+ // Migrate generated code. | |
+ // The generated code can contain Object** values (typically from handles) | |
+ // that are dereferenced during the copy to point directly to the actual heap | |
+ // objects. These pointers can include references to the code object itself, | |
+ // through the self_reference parameter. | |
+ // FIXME(llvm): it dereferences desc.origin | |
+ code->CopyFrom(desc, &safepoint_table_desc, reloc_data, delta); | |
+ | |
+#ifdef VERIFY_HEAP | |
+ if (FLAG_verify_heap) code->ObjectVerify(); | |
+#endif | |
+ return code; | |
+} | |
Handle<Code> Factory::NewCode(const CodeDesc& desc, | |
Code::Flags flags, | |
diff --git a/src/factory.h b/src/factory.h | |
index 22baf45..4d2a09f 100644 | |
--- a/src/factory.h | |
+++ b/src/factory.h | |
@@ -537,6 +537,14 @@ class Factory final { | |
int prologue_offset = Code::kPrologueOffsetNotSet, | |
bool is_debug = false); | |
+ Handle<Code> NewLLVMCode(const CodeDesc& desc, | |
+ const CodeDesc& safepoint_table_desc, | |
+ const Vector<byte>* reloc_data, | |
+ Code::Flags flags, | |
+ bool immovable = false, | |
+ int prologue_offset = Code::kPrologueOffsetNotSet, | |
+ bool is_debug = false); | |
+ | |
Handle<Code> CopyCode(Handle<Code> code); | |
Handle<Code> CopyCode(Handle<Code> code, Vector<byte> reloc_info); | |
diff --git a/src/flag-definitions.h b/src/flag-definitions.h | |
index 7ea795f..928299c 100644 | |
--- a/src/flag-definitions.h | |
+++ b/src/flag-definitions.h | |
@@ -400,6 +400,12 @@ DEFINE_BOOL(omit_map_checks_for_leaf_maps, true, | |
"do not emit check maps for constant values that have a leaf map, " | |
"deoptimize the optimized code if the layout of the maps changes.") | |
+// Flags for LLVM | |
+DEFINE_STRING(llvm_filter, "~", "filter for functions to be lowered to " | |
+ "llvm instead of Lithium") | |
+DEFINE_BOOL(phi_normalize, true, "enable phi normalization phaze" | |
+ " (it's a temporary hack, phis must always be normalized") | |
+ | |
// Flags for TurboFan. | |
DEFINE_BOOL(turbo, false, "enable TurboFan compiler") | |
DEFINE_BOOL(turbo_shipping, true, "enable TurboFan compiler on subset") | |
diff --git a/src/frames.cc b/src/frames.cc | |
index 7b84797..7c0a6c0 100644 | |
--- a/src/frames.cc | |
+++ b/src/frames.cc | |
@@ -645,11 +645,18 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const { | |
// Visit the outgoing parameters. | |
Object** parameters_base = &Memory::Object_at(sp()); | |
- Object** parameters_limit = &Memory::Object_at( | |
+ Object** locals_base = &Memory::Object_at( | |
fp() + JavaScriptFrameConstants::kFunctionOffset - slot_space); | |
+ Object** parameters_limit = nullptr; | |
+ | |
+ if (!code->is_llvmed()) | |
+ parameters_limit = locals_base; | |
+ else | |
+ parameters_limit = parameters_base + safepoint_entry.num_function_args(); | |
// Visit the parameters that may be on top of the saved registers. | |
if (safepoint_entry.argument_count() > 0) { | |
+ if (code->is_llvmed()) UNIMPLEMENTED(); | |
v->VisitPointers(parameters_base, | |
parameters_base + safepoint_entry.argument_count()); | |
parameters_base += safepoint_entry.argument_count(); | |
@@ -688,7 +695,7 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const { | |
int byte_index = index >> kBitsPerByteLog2; | |
int bit_index = index & (kBitsPerByte - 1); | |
if ((safepoint_bits[byte_index] & (1U << bit_index)) != 0) { | |
- v->VisitPointer(parameters_limit + index); | |
+ v->VisitPointer(locals_base + index); | |
} | |
} | |
diff --git a/src/full-codegen/x64/full-codegen-x64.cc b/src/full-codegen/x64/full-codegen-x64.cc | |
index c522d2c..ff89762 100644 | |
--- a/src/full-codegen/x64/full-codegen-x64.cc | |
+++ b/src/full-codegen/x64/full-codegen-x64.cc | |
@@ -2869,6 +2869,7 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) { | |
} | |
SetCallPosition(expr, arg_count); | |
+ __ movp(rbx, Immediate(0)); //FIXME: Maybe redudant | |
Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, call_type).code(); | |
__ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot())); | |
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize)); | |
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc | |
index 4482155..2cec8f3 100644 | |
--- a/src/hydrogen-instructions.cc | |
+++ b/src/hydrogen-instructions.cc | |
@@ -9,6 +9,7 @@ | |
#include "src/elements.h" | |
#include "src/factory.h" | |
#include "src/hydrogen-infer-representation.h" | |
+#include "src/llvm/llvm-chunk.h" | |
#if V8_TARGET_ARCH_IA32 | |
#include "src/ia32/lithium-ia32.h" // NOLINT | |
@@ -42,6 +43,12 @@ namespace internal { | |
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) | |
#undef DEFINE_COMPILE | |
+#define DEFINE_COMPILE(type) \ | |
+ void H##type::CompileToLLVM(LLVMChunkBuilder* builder) { \ | |
+ builder->Do##type(this); \ | |
+ } | |
+HYDROGEN_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) | |
+#undef DEFINE_COMPILE | |
Isolate* HValue::isolate() const { | |
DCHECK(block() != NULL); | |
@@ -718,6 +725,14 @@ bool HInstruction::Dominates(HInstruction* other) { | |
return false; | |
} | |
+bool HValue::IsReacheableFrom(HValue* other) { | |
+ DCHECK(IsInstruction() && other->IsInstruction()); | |
+ if (other->block() == block()) { | |
+ return HInstruction::cast(other)->Dominates(HInstruction::cast(this)); | |
+ } else { | |
+ return block()->IsReacheableFrom(other->block()); | |
+ } | |
+} | |
#ifdef DEBUG | |
void HInstruction::Verify() { | |
@@ -1581,6 +1596,8 @@ HValue* HUnaryMathOperation::Canonicalize() { | |
} | |
} | |
if (op() == kMathFloor && value()->IsDiv() && value()->HasOneUse()) { | |
+ //TODO issue connected with MathFloorOfDiv | |
+ return this ; | |
HDiv* hdiv = HDiv::cast(value()); | |
HValue* left = hdiv->left(); | |
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h | |
index f7acbb5..77dbe4a 100644 | |
--- a/src/hydrogen-instructions.h | |
+++ b/src/hydrogen-instructions.h | |
@@ -5,9 +5,13 @@ | |
#ifndef V8_HYDROGEN_INSTRUCTIONS_H_ | |
#define V8_HYDROGEN_INSTRUCTIONS_H_ | |
+#include "src/llvm/llvm-headers.h" | |
+ | |
#include <cstring> | |
#include <iosfwd> | |
+#include "src/v8.h" | |
+ | |
#include "src/allocation.h" | |
#include "src/base/bits.h" | |
#include "src/bit-vector.h" | |
@@ -35,6 +39,7 @@ class HStoreNamedField; | |
class HValue; | |
class LInstruction; | |
class LChunkBuilder; | |
+class LLVMChunkBuilder; | |
#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V) \ | |
V(ArithmeticBinaryOperation) \ | |
@@ -199,6 +204,7 @@ class LChunkBuilder; | |
#define DECLARE_CONCRETE_INSTRUCTION(type) \ | |
LInstruction* CompileToLithium(LChunkBuilder* builder) final; \ | |
+ void CompileToLLVM(LLVMChunkBuilder* builder) final; \ | |
static H##type* cast(HValue* value) { \ | |
DCHECK(value->Is##type()); \ | |
return reinterpret_cast<H##type*>(value); \ | |
@@ -502,7 +508,8 @@ class HValue : public ZoneObject { | |
#ifdef DEBUG | |
range_poisoned_(false), | |
#endif | |
- flags_(0) {} | |
+ flags_(0), | |
+ llvm_value_(nullptr) {} | |
virtual ~HValue() {} | |
virtual SourcePosition position() const { return SourcePosition::Unknown(); } | |
@@ -519,6 +526,11 @@ class HValue : public ZoneObject { | |
int id() const { return id_; } | |
void set_id(int id) { id_ = id; } | |
+ llvm::Value* llvm_value() const { return llvm_value_; } | |
+ void set_llvm_value(llvm::Value* llvm_instruction) { | |
+ llvm_value_ = llvm_instruction; | |
+ } | |
+ | |
HUseIterator uses() const { return HUseIterator(use_list_); } | |
virtual bool EmitAtUses() { return false; } | |
@@ -770,6 +782,8 @@ class HValue : public ZoneObject { | |
? FAIL_ON_MINUS_ZERO : TREAT_MINUS_ZERO_AS_ZERO; | |
} | |
+ bool IsReacheableFrom(HValue* other); | |
+ | |
protected: | |
// This function must be overridden for instructions with flag kUseGVN, to | |
// compare the non-Operand parts of the instruction. | |
@@ -860,6 +874,8 @@ class HValue : public ZoneObject { | |
int flags_; | |
GVNFlagSet changes_flags_; | |
GVNFlagSet depends_on_flags_; | |
+ // FIXME(llvm): not sure who takes care of deallocation. | |
+ llvm::Value* llvm_value_; | |
private: | |
virtual bool IsDeletable() const { return false; } | |
@@ -1069,6 +1085,9 @@ class HInstruction : public HValue { | |
HInstruction* previous() const { return previous_; } | |
std::ostream& PrintTo(std::ostream& os) const override; // NOLINT | |
+ | |
+ llvm::Value* llvm_value() const { return llvm_value_; } | |
+ | |
virtual std::ostream& PrintDataTo(std::ostream& os) const; // NOLINT | |
bool IsLinked() const { return block() != NULL; } | |
@@ -1116,6 +1135,8 @@ class HInstruction : public HValue { | |
bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); } | |
virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0; | |
+ // TODO(llvm): return type? | |
+ virtual void CompileToLLVM(LLVMChunkBuilder* builder) = 0; | |
#ifdef DEBUG | |
void Verify() override; | |
diff --git a/src/hydrogen-osr.h b/src/hydrogen-osr.h | |
index 6a63988a..9e58771 100644 | |
--- a/src/hydrogen-osr.h | |
+++ b/src/hydrogen-osr.h | |
@@ -38,6 +38,10 @@ class HOsrBuilder : public ZoneObject { | |
int UnoptimizedFrameSlots() const { | |
return unoptimized_frame_slots_; | |
} | |
+ | |
+ HBasicBlock* osr_entry() { | |
+ return osr_entry_; | |
+ } | |
bool HasOsrEntryAt(IterationStatement* statement); | |
diff --git a/src/hydrogen.cc b/src/hydrogen.cc | |
index fc3aa4c..5b1361e 100644 | |
--- a/src/hydrogen.cc | |
+++ b/src/hydrogen.cc | |
@@ -82,8 +82,11 @@ HBasicBlock::HBasicBlock(HGraph* graph) | |
first_instruction_index_(-1), | |
last_instruction_index_(-1), | |
deleted_phis_(4, graph->zone()), | |
+ defined_consts_(4, graph->zone()), | |
parent_loop_header_(NULL), | |
inlined_entry_block_(NULL), | |
+ llvm_start_basic_block_(nullptr), | |
+ llvm_end_basic_block_(nullptr), | |
is_inline_return_target_(false), | |
is_reachable_(true), | |
dominates_loop_successors_(false), | |
@@ -330,6 +333,15 @@ void HBasicBlock::MarkSuccEdgeUnreachable(int succ) { | |
succ_block->MarkUnreachable(); | |
} | |
+void HBasicBlock::ReverseDFS(HBasicBlock* v, GrowableBitVector& visited, | |
+ Zone* zone) { | |
+ visited.Add(v->block_id(), zone); | |
+ for (HPredecessorIterator it(v); !it.Done(); it.Advance()) { | |
+ HBasicBlock* predecessor = it.Current(); | |
+ if (!visited.Contains(predecessor->block_id())) | |
+ ReverseDFS(predecessor, visited, zone); | |
+ } | |
+} | |
void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) { | |
if (HasPredecessor()) { | |
@@ -4584,6 +4596,10 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) { | |
// made blocks unreachable that were previously reachable. | |
Run<HMarkUnreachableBlocksPhase>(); | |
+ // The stages above might have produced redundant phis | |
+ // which we do not tolerate in llv8. | |
+ Run<HRedundantPhiEliminationPhase>(); | |
+ | |
return true; | |
} | |
@@ -11172,9 +11188,11 @@ HValue* HGraphBuilder::BuildBinaryOperation( | |
break; | |
case Token::BIT_OR: { | |
HValue *operand, *shift_amount; | |
+ bool is_llvm = !graph()->info()->closure().is_null() && | |
+ graph()->info()->closure()->PassesFilter(FLAG_llvm_filter); | |
if (left_type->Is(Type::Signed32()) && | |
right_type->Is(Type::Signed32()) && | |
- MatchRotateRight(left, right, &operand, &shift_amount)) { | |
+ MatchRotateRight(left, right, &operand, &shift_amount) && !is_llvm) { | |
instr = AddUncasted<HRor>(operand, shift_amount, strength); | |
} else { | |
instr = AddUncasted<HBitwise>(op, left, right, strength); | |
diff --git a/src/hydrogen.h b/src/hydrogen.h | |
index 464d857..c7599af 100644 | |
--- a/src/hydrogen.h | |
+++ b/src/hydrogen.h | |
@@ -5,6 +5,10 @@ | |
#ifndef V8_HYDROGEN_H_ | |
#define V8_HYDROGEN_H_ | |
+#include "src/llvm/llvm-headers.h" | |
+ | |
+#include "src/v8.h" | |
+ | |
#include "src/accessors.h" | |
#include "src/allocation.h" | |
#include "src/ast.h" | |
@@ -62,7 +66,27 @@ class HBasicBlock final : public ZoneObject { | |
void RecordDeletedPhi(int merge_index) { | |
deleted_phis_.Add(merge_index, zone()); | |
} | |
+ const ZoneList<HValue*>* defined_consts() const { | |
+ return &defined_consts_; | |
+ } | |
+ void RecordConst(HValue* constant) { | |
+ defined_consts_.Add(constant, zone());; | |
+ } | |
HBasicBlock* dominator() const { return dominator_; } | |
+ llvm::BasicBlock* llvm_start_basic_block() const { | |
+ return llvm_start_basic_block_; | |
+ } | |
+ void set_llvm_start_basic_block(llvm::BasicBlock* block) { | |
+ DCHECK(!llvm_end_basic_block_); | |
+ llvm_start_basic_block_ = block; | |
+ llvm_end_basic_block_ = block; | |
+ } | |
+ llvm::BasicBlock* llvm_end_basic_block() const { | |
+ return llvm_end_basic_block_; | |
+ } | |
+ void set_llvm_end_basic_block(llvm::BasicBlock* block) { | |
+ llvm_end_basic_block_ = block; | |
+ } | |
HEnvironment* last_environment() const { return last_environment_; } | |
int argument_count() const { return argument_count_; } | |
void set_argument_count(int count) { argument_count_ = count; } | |
@@ -156,6 +180,16 @@ class HBasicBlock final : public ZoneObject { | |
inline Zone* zone() const; | |
+ // DFS by reverse edges. | |
+ static void ReverseDFS(HBasicBlock* v, GrowableBitVector& visited, | |
+ Zone* zone); | |
+ | |
+ bool IsReacheableFrom(HBasicBlock* other) { | |
+ GrowableBitVector visited; | |
+ ReverseDFS(this, visited, zone()); | |
+ return visited.Contains(other->block_id()); | |
+ } | |
+ | |
#ifdef DEBUG | |
void Verify(); | |
#endif | |
@@ -198,9 +232,12 @@ class HBasicBlock final : public ZoneObject { | |
int first_instruction_index_; | |
int last_instruction_index_; | |
ZoneList<int> deleted_phis_; | |
+ ZoneList<HValue*> defined_consts_; | |
HBasicBlock* parent_loop_header_; | |
// For blocks marked as inline return target: the block with HEnterInlined. | |
HBasicBlock* inlined_entry_block_; | |
+ llvm::BasicBlock* llvm_start_basic_block_; | |
+ llvm::BasicBlock* llvm_end_basic_block_; | |
bool is_inline_return_target_ : 1; | |
bool is_reachable_ : 1; | |
bool dominates_loop_successors_ : 1; | |
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc | |
index 262e16b..669c4bb 100644 | |
--- a/src/ia32/lithium-ia32.cc | |
+++ b/src/ia32/lithium-ia32.cc | |
@@ -451,12 +451,12 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) { | |
LPlatformChunk* LChunkBuilder::Build() { | |
DCHECK(is_unused()); | |
chunk_ = new(zone()) LPlatformChunk(info(), graph()); | |
- LPhase phase("L_Building chunk", chunk_); | |
+ LPhase phase("L_Building chunk", chunk()); | |
status_ = BUILDING; | |
// Reserve the first spill slot for the state of dynamic alignment. | |
if (info()->IsOptimizing()) { | |
- int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS); | |
+ int alignment_state_index = chunk()->GetNextSpillIndex(GENERAL_REGISTERS); | |
DCHECK_EQ(alignment_state_index, 0); | |
USE(alignment_state_index); | |
} | |
@@ -465,7 +465,7 @@ LPlatformChunk* LChunkBuilder::Build() { | |
// which will be subsumed into this frame. | |
if (graph()->has_osr()) { | |
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) { | |
- chunk_->GetNextSpillIndex(GENERAL_REGISTERS); | |
+ chunk()->GetNextSpillIndex(GENERAL_REGISTERS); | |
} | |
} | |
@@ -477,7 +477,7 @@ LPlatformChunk* LChunkBuilder::Build() { | |
if (is_aborted()) return NULL; | |
} | |
status_ = DONE; | |
- return chunk_; | |
+ return chunk(); | |
} | |
@@ -537,14 +537,14 @@ static inline bool CanBeImmediateConstant(HValue* value) { | |
LOperand* LChunkBuilder::UseOrConstant(HValue* value) { | |
return CanBeImmediateConstant(value) | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: Use(value); | |
} | |
LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { | |
return CanBeImmediateConstant(value) | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseAtStart(value); | |
} | |
@@ -552,33 +552,33 @@ LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { | |
LOperand* LChunkBuilder::UseFixedOrConstant(HValue* value, | |
Register fixed_register) { | |
return CanBeImmediateConstant(value) | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseFixed(value, fixed_register); | |
} | |
LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { | |
return CanBeImmediateConstant(value) | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseRegister(value); | |
} | |
LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { | |
return CanBeImmediateConstant(value) | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseRegisterAtStart(value); | |
} | |
LOperand* LChunkBuilder::UseConstant(HValue* value) { | |
- return chunk_->DefineConstantOperand(HConstant::cast(value)); | |
+ return chunk()->DefineConstantOperand(HConstant::cast(value)); | |
} | |
LOperand* LChunkBuilder::UseAny(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: Use(value, new(zone()) LUnallocated(LUnallocated::ANY)); | |
} | |
@@ -742,7 +742,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, | |
bool does_deopt = false; | |
if (right_value->IsConstant()) { | |
HConstant* constant = HConstant::cast(right_value); | |
- right = chunk_->DefineConstantOperand(constant); | |
+ right = chunk()->DefineConstantOperand(constant); | |
constant_value = constant->Integer32Value() & 0x1f; | |
// Left shifts can deoptimize if we shift by > 0 and the result cannot be | |
// truncated to smi. | |
@@ -851,7 +851,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { | |
argument_count_ = pred->argument_count(); | |
} | |
HInstruction* current = block->first(); | |
- int start = chunk_->instructions()->length(); | |
+ int start = chunk()->instructions()->length(); | |
while (current != NULL && !is_aborted()) { | |
// Code for constants in registers is generated lazily. | |
if (!current->EmitAtUses()) { | |
@@ -859,7 +859,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { | |
} | |
current = current->next(); | |
} | |
- int end = chunk_->instructions()->length() - 1; | |
+ int end = chunk()->instructions()->length() - 1; | |
if (end >= start) { | |
block->set_first_instruction_index(start); | |
block->set_last_instruction_index(end); | |
@@ -888,7 +888,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { | |
LInstruction* dummy = | |
new(zone()) LDummyUse(UseAny(current->OperandAt(i))); | |
dummy->set_hydrogen_value(current); | |
- chunk_->AddInstruction(dummy, current_block_); | |
+ chunk()->AddInstruction(dummy, current_block_); | |
} | |
} else { | |
HBasicBlock* successor; | |
@@ -954,7 +954,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr, | |
if (FLAG_stress_environments && !instr->HasEnvironment()) { | |
instr = AssignEnvironment(instr); | |
} | |
- chunk_->AddInstruction(instr, current_block_); | |
+ chunk()->AddInstruction(instr, current_block_); | |
if (instr->IsCall() || instr->IsPrologue()) { | |
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val; | |
diff --git a/src/lithium-codegen.cc b/src/lithium-codegen.cc | |
index 267df58..43fbe6b 100644 | |
--- a/src/lithium-codegen.cc | |
+++ b/src/lithium-codegen.cc | |
@@ -38,17 +38,11 @@ namespace v8 { | |
namespace internal { | |
-HGraph* LCodeGenBase::graph() const { | |
- return chunk()->graph(); | |
-} | |
- | |
- | |
-LCodeGenBase::LCodeGenBase(LChunk* chunk, MacroAssembler* assembler, | |
+LCodeGenBase::LCodeGenBase(LChunk* chunk, | |
+ MacroAssembler* assembler, | |
CompilationInfo* info) | |
- : chunk_(static_cast<LPlatformChunk*>(chunk)), | |
+ : LowCodeGenBase(chunk, info), | |
masm_(assembler), | |
- info_(info), | |
- zone_(info->zone()), | |
status_(UNUSED), | |
current_block_(-1), | |
current_instruction_(-1), | |
@@ -56,6 +50,9 @@ LCodeGenBase::LCodeGenBase(LChunk* chunk, MacroAssembler* assembler, | |
deoptimization_literals_(8, info->zone()), | |
last_lazy_deopt_pc_(0) {} | |
+LPlatformChunk* LCodeGenBase::chunk() const { | |
+ return static_cast<LPlatformChunk*>(chunk_); | |
+} | |
bool LCodeGenBase::GenerateBody() { | |
DCHECK(is_generating()); | |
@@ -157,7 +154,7 @@ void LCodeGenBase::DeoptComment(const Deoptimizer::DeoptInfo& deopt_info) { | |
int LCodeGenBase::GetNextEmittedBlock() const { | |
for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) { | |
if (!graph()->blocks()->at(i)->IsReachable()) continue; | |
- if (!chunk_->GetLabel(i)->HasReplacement()) return i; | |
+ if (!chunk()->GetLabel(i)->HasReplacement()) return i; | |
} | |
return -1; | |
} | |
@@ -177,13 +174,13 @@ void LCodeGenBase::Retry(BailoutReason reason) { | |
void LCodeGenBase::AddDeprecationDependency(Handle<Map> map) { | |
if (map->is_deprecated()) return Retry(kMapBecameDeprecated); | |
- chunk_->AddDeprecationDependency(map); | |
+ chunk()->AddDeprecationDependency(map); | |
} | |
void LCodeGenBase::AddStabilityDependency(Handle<Map> map) { | |
if (!map->is_stable()) return Retry(kMapBecameUnstable); | |
- chunk_->AddStabilityDependency(map); | |
+ chunk()->AddStabilityDependency(map); | |
} | |
diff --git a/src/lithium-codegen.h b/src/lithium-codegen.h | |
index c654ff7..76117a1 100644 | |
--- a/src/lithium-codegen.h | |
+++ b/src/lithium-codegen.h | |
@@ -7,6 +7,7 @@ | |
#include "src/bailout-reason.h" | |
#include "src/compiler.h" | |
+#include "src/low-chunk.h" | |
#include "src/deoptimizer.h" | |
namespace v8 { | |
@@ -16,22 +17,16 @@ class LEnvironment; | |
class LInstruction; | |
class LPlatformChunk; | |
-class LCodeGenBase BASE_EMBEDDED { | |
+class LCodeGenBase : public LowCodeGenBase { | |
public: | |
LCodeGenBase(LChunk* chunk, | |
MacroAssembler* assembler, | |
CompilationInfo* info); | |
- virtual ~LCodeGenBase() {} | |
+ ~LCodeGenBase() override {} | |
// Simple accessors. | |
MacroAssembler* masm() const { return masm_; } | |
- CompilationInfo* info() const { return info_; } | |
- Isolate* isolate() const { return info_->isolate(); } | |
- Factory* factory() const { return isolate()->factory(); } | |
- Heap* heap() const { return isolate()->heap(); } | |
- Zone* zone() const { return zone_; } | |
- LPlatformChunk* chunk() const { return chunk_; } | |
- HGraph* graph() const; | |
+ LPlatformChunk* chunk() const; // shadows base chunk() | |
void FPRINTF_CHECKING Comment(const char* format, ...); | |
void DeoptComment(const Deoptimizer::DeoptInfo& deopt_info); | |
@@ -66,11 +61,9 @@ class LCodeGenBase BASE_EMBEDDED { | |
ABORTED | |
}; | |
- LPlatformChunk* const chunk_; | |
MacroAssembler* const masm_; | |
- CompilationInfo* const info_; | |
- Zone* zone_; | |
- Status status_; | |
+ | |
+ Status status_; // TODO(llvm) consider pulling up this field and the enum | |
int current_block_; | |
int current_instruction_; | |
const ZoneList<LInstruction*>* instructions_; | |
diff --git a/src/lithium.cc b/src/lithium.cc | |
index bc48a0a..8586d3b 100644 | |
--- a/src/lithium.cc | |
+++ b/src/lithium.cc | |
@@ -261,18 +261,16 @@ int StackSlotOffset(int index) { | |
LChunk::LChunk(CompilationInfo* info, HGraph* graph) | |
- : spill_slot_count_(0), | |
- info_(info), | |
- graph_(graph), | |
+ : LowChunk(info, graph), | |
+ spill_slot_count_(0), | |
instructions_(32, info->zone()), | |
pointer_maps_(8, info->zone()), | |
inlined_functions_(1, info->zone()), | |
deprecation_dependencies_(32, info->zone()), | |
stability_dependencies_(8, info->zone()) {} | |
- | |
LLabel* LChunk::GetLabel(int block_id) const { | |
- HBasicBlock* block = graph_->blocks()->at(block_id); | |
+ HBasicBlock* block = graph()->blocks()->at(block_id); | |
int first_instruction = block->first_instruction_index(); | |
return LLabel::cast(instructions_[first_instruction]); | |
} | |
@@ -396,13 +394,13 @@ void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) { | |
HConstant* LChunk::LookupConstant(LConstantOperand* operand) const { | |
- return HConstant::cast(graph_->LookupValue(operand->index())); | |
+ return HConstant::cast(graph()->LookupValue(operand->index())); | |
} | |
Representation LChunk::LookupLiteralRepresentation( | |
LConstantOperand* operand) const { | |
- return graph_->LookupValue(operand->index())->representation(); | |
+ return graph()->LookupValue(operand->index())->representation(); | |
} | |
@@ -470,7 +468,7 @@ void LChunk::CommitDependencies(Handle<Code> code) const { | |
Map::AddDependentCode(map, DependentCode::kPrototypeCheckGroup, code); | |
} | |
- info_->dependencies()->Commit(code); | |
+ info()->dependencies()->Commit(code); | |
RegisterWeakObjectsInOptimizedCode(code); | |
} | |
@@ -552,19 +550,10 @@ void LChunk::set_allocated_double_registers(BitVector* allocated_registers) { | |
} | |
} | |
- | |
-void LChunkBuilderBase::Abort(BailoutReason reason) { | |
- info()->AbortOptimization(reason); | |
- status_ = ABORTED; | |
-} | |
- | |
- | |
-void LChunkBuilderBase::Retry(BailoutReason reason) { | |
- info()->RetryOptimization(reason); | |
- status_ = ABORTED; | |
+LPlatformChunk* LChunkBuilderBase::chunk() const { | |
+ return static_cast<LPlatformChunk*>(chunk_); | |
} | |
- | |
LEnvironment* LChunkBuilderBase::CreateEnvironment( | |
HEnvironment* hydrogen_env, int* argument_index_accumulator, | |
ZoneList<HValue*>* objects_to_materialize) { | |
diff --git a/src/lithium.h b/src/lithium.h | |
index 97c5c1d..a1a10e2 100644 | |
--- a/src/lithium.h | |
+++ b/src/lithium.h | |
@@ -10,6 +10,7 @@ | |
#include "src/allocation.h" | |
#include "src/bailout-reason.h" | |
#include "src/hydrogen.h" | |
+#include "src/low-chunk.h" | |
#include "src/safepoint-table.h" | |
#include "src/zone-allocator.h" | |
@@ -627,8 +628,12 @@ class LLabel; | |
// Superclass providing data and behavior common to all the | |
// arch-specific LPlatformChunk classes. | |
-class LChunk : public ZoneObject { | |
+class LChunk : public LowChunk { | |
public: | |
+ // FIXME(llvm): shall we add the actual code for the destructor | |
+ // or the Zone takes care of everything? | |
+ // FIXME(llvm): should it be public? | |
+ ~LChunk() override {} | |
static LChunk* NewChunk(HGraph* graph); | |
void AddInstruction(LInstruction* instruction, HBasicBlock* block); | |
@@ -639,9 +644,6 @@ class LChunk : public ZoneObject { | |
int ParameterAt(int index); | |
int GetParameterStackSlot(int index) const; | |
int spill_slot_count() const { return spill_slot_count_; } | |
- CompilationInfo* info() const { return info_; } | |
- HGraph* graph() const { return graph_; } | |
- Isolate* isolate() const { return graph_->isolate(); } | |
const ZoneList<LInstruction*>* instructions() const { return &instructions_; } | |
void AddGapMove(int index, LOperand* from, LOperand* to); | |
LGap* GetGapAt(int index) const; | |
@@ -664,20 +666,18 @@ class LChunk : public ZoneObject { | |
void AddDeprecationDependency(Handle<Map> map) { | |
DCHECK(!map->is_deprecated()); | |
if (!map->CanBeDeprecated()) return; | |
- DCHECK(!info_->IsStub()); | |
+ DCHECK(!info()->IsStub()); | |
deprecation_dependencies_.Add(map, zone()); | |
} | |
void AddStabilityDependency(Handle<Map> map) { | |
DCHECK(map->is_stable()); | |
if (!map->CanTransition()) return; | |
- DCHECK(!info_->IsStub()); | |
+ DCHECK(!info()->IsStub()); | |
stability_dependencies_.Add(map, zone()); | |
} | |
- Zone* zone() const { return info_->zone(); } | |
- | |
- Handle<Code> Codegen(); | |
+ Handle<Code> Codegen() override; | |
void set_allocated_double_registers(BitVector* allocated_registers); | |
BitVector* allocated_double_registers() { | |
@@ -693,8 +693,6 @@ class LChunk : public ZoneObject { | |
void RegisterWeakObjectsInOptimizedCode(Handle<Code> code) const; | |
void CommitDependencies(Handle<Code> code) const; | |
- CompilationInfo* info_; | |
- HGraph* const graph_; | |
BitVector* allocated_double_registers_; | |
ZoneList<LInstruction*> instructions_; | |
ZoneList<LPointerMap*> pointer_maps_; | |
@@ -704,36 +702,15 @@ class LChunk : public ZoneObject { | |
}; | |
-class LChunkBuilderBase BASE_EMBEDDED { | |
+class LChunkBuilderBase : public LowChunkBuilderBase { | |
public: | |
explicit LChunkBuilderBase(CompilationInfo* info, HGraph* graph) | |
- : argument_count_(0), | |
- chunk_(NULL), | |
- info_(info), | |
- graph_(graph), | |
- status_(UNUSED), | |
- zone_(graph->zone()) {} | |
- | |
- virtual ~LChunkBuilderBase() { } | |
+ : LowChunkBuilderBase(info, graph) {} | |
- void Abort(BailoutReason reason); | |
- void Retry(BailoutReason reason); | |
+ ~LChunkBuilderBase() override {} | |
protected: | |
- enum Status { UNUSED, BUILDING, DONE, ABORTED }; | |
- | |
- LPlatformChunk* chunk() const { return chunk_; } | |
- CompilationInfo* info() const { return info_; } | |
- HGraph* graph() const { return graph_; } | |
- int argument_count() const { return argument_count_; } | |
- Isolate* isolate() const { return graph_->isolate(); } | |
- Heap* heap() const { return isolate()->heap(); } | |
- | |
- bool is_unused() const { return status_ == UNUSED; } | |
- bool is_building() const { return status_ == BUILDING; } | |
- bool is_done() const { return status_ == DONE; } | |
- bool is_aborted() const { return status_ == ABORTED; } | |
- | |
+ LPlatformChunk* chunk() const; // FIXME(llvm): shadows chunk() in LowChunk. Should probably be virtual | |
// An input operand in register, stack slot or a constant operand. | |
// Will not be moved to a register even if one is freely available. | |
virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) = 0; | |
@@ -744,17 +721,6 @@ class LChunkBuilderBase BASE_EMBEDDED { | |
void AddObjectToMaterialize(HValue* value, | |
ZoneList<HValue*>* objects_to_materialize, | |
LEnvironment* result); | |
- | |
- Zone* zone() const { return zone_; } | |
- | |
- int argument_count_; | |
- LPlatformChunk* chunk_; | |
- CompilationInfo* info_; | |
- HGraph* const graph_; | |
- Status status_; | |
- | |
- private: | |
- Zone* zone_; | |
}; | |
diff --git a/src/llvm/llvm-chunk.cc b/src/llvm/llvm-chunk.cc | |
new file mode 100644 | |
index 0000000..b91a4b9 | |
--- /dev/null | |
+++ b/src/llvm/llvm-chunk.cc | |
@@ -0,0 +1,7162 @@ | |
+// Use of this source code is governed by a BSD-style license that can be | |
+// found in the LICENSE file. | |
+ | |
+#include <cstdio> | |
+#include <iomanip> | |
+#include <set> | |
+ | |
+#include "src/code-factory.h" | |
+#include "src/disassembler.h" | |
+#include "src/hydrogen-osr.h" | |
+#include "src/ic/ic.h" | |
+#include "src/safepoint-table.h" | |
+#include "llvm-chunk.h" | |
+#include "pass-normalize-phis.h" | |
+#include "src/profiler/cpu-profiler.h" | |
+#include <llvm/IR/InlineAsm.h> | |
+#include "llvm-stackmaps.h" | |
+ | |
+namespace v8 { | |
+namespace internal { | |
+ | |
+#define __ llvm_ir_builder_-> | |
+ | |
+auto LLVMGranularity::x64_target_triple = "x86_64-unknown-linux-gnu"; | |
+const char* LLVMChunkBuilder::kGcStrategyName = "v8-gc"; | |
+const std::string LLVMChunkBuilder::kPointersPrefix = "pointer_"; | |
+llvm::Type* Types::i8 = nullptr; | |
+llvm::Type* Types::i16 = nullptr; | |
+llvm::Type* Types::i32 = nullptr; | |
+llvm::Type* Types::i64 = nullptr; | |
+llvm::Type* Types::float32 = nullptr; | |
+llvm::Type* Types::float64 = nullptr; | |
+llvm::PointerType* Types::ptr_i8 = nullptr; | |
+llvm::PointerType* Types::ptr_i16 = nullptr; | |
+llvm::PointerType* Types::ptr_i32 = nullptr; | |
+llvm::PointerType* Types::ptr_i64 = nullptr; | |
+llvm::PointerType* Types::ptr_float32 = nullptr; | |
+llvm::PointerType* Types::ptr_float64 = nullptr; | |
+llvm::Type* Types::tagged = nullptr; | |
+llvm::PointerType* Types::ptr_tagged = nullptr; | |
+llvm::Type* Types::smi = nullptr; | |
+llvm::Type* Types::ptr_smi = nullptr; | |
+ | |
+LLVMChunk::~LLVMChunk() {} | |
+ | |
+static void DumpSafepoints(Code* code) { | |
+ SafepointTable table(code); | |
+ std::cerr << "Safepoints (size = " << table.size() << ")\n"; | |
+ for (unsigned i = 0; i < table.length(); i++) { | |
+ unsigned pc_offset = table.GetPcOffset(i); | |
+ std::cerr << static_cast<const void*>(code->instruction_start() + pc_offset) << " "; | |
+ std::cerr << std::setw(4) << pc_offset << " "; | |
+ table.PrintEntry(i, std::cerr); | |
+ std::cerr << " (sp -> fp) "; | |
+ SafepointEntry entry = table.GetEntry(i); | |
+ if (entry.deoptimization_index() != Safepoint::kNoDeoptimizationIndex) { | |
+ std::cerr << std::setw(6) << entry.deoptimization_index(); | |
+ } else { | |
+ std::cerr << "<none>"; | |
+ } | |
+ if (entry.argument_count() > 0) { | |
+ std::cerr << " argc: " << entry.argument_count(); | |
+ } | |
+ std::cerr << "\n"; | |
+ } | |
+ std::cerr << "\n"; | |
+} | |
+ | |
+Handle<Code> LLVMChunk::Codegen() { | |
+ uint64_t address = LLVMGranularity::getInstance().GetFunctionAddress( | |
+ llvm_function_id_); | |
+ auto buf = LLVMGranularity::getInstance().memory_manager_ref() | |
+ ->LastAllocatedCode().buffer; | |
+ USE(buf); | |
+#ifdef DEBUG | |
+ std::cerr << "\taddress == " << reinterpret_cast<void*>(address) << std::endl; | |
+ std::cerr << "\tlast allocated code section start == " | |
+ << static_cast<void*>(buf) << std::endl; | |
+ // FIXME(llvm): | |
+ // The right thing is address. But for now it's harder to get. So there. | |
+ if (reinterpret_cast<void*>(address) != static_cast<void*>(buf)) | |
+ UNIMPLEMENTED(); | |
+ LLVMGranularity::getInstance().Err(); | |
+#else | |
+ USE(address); | |
+#endif | |
+ | |
+ Isolate* isolate = info()->isolate(); | |
+ CodeDesc& code_desc = | |
+ LLVMGranularity::getInstance().memory_manager_ref()->LastAllocatedCode(); | |
+ | |
+ // This is of course totally untrue. | |
+ code_desc.origin = &masm_; | |
+ | |
+#ifdef DEBUG | |
+ LLVMGranularity::getInstance().Disass( | |
+ code_desc.buffer, code_desc.buffer + code_desc.instr_size); | |
+#endif | |
+ | |
+ StackMaps stackmaps = GetStackMaps(); | |
+ | |
+ // It is important that this call goes before EmitSafepointTable() | |
+ // because it patches nop sequences to calls (and EmitSafepointTable | |
+ // looks for calls in the instruction stream to determine their sizes). | |
+ std::vector<RelocInfo> reloc_info_from_patchpoints = | |
+ SetUpRelativeCalls(buf, stackmaps); | |
+ | |
+ // This assembler owns it's buffer (it contains our SafepointTable). | |
+ // FIXME(llvm): assembler shouldn't care for kGap for our case... | |
+ auto initial_buffer_size = Max(code_desc.buffer_size / 6, 32); | |
+ Assembler assembler(isolate, nullptr, initial_buffer_size); | |
+ EmitSafepointTable(&assembler, stackmaps, buf); | |
+ CodeDesc safepoint_table_desc; | |
+ assembler.GetCode(&safepoint_table_desc); | |
+ | |
+ Vector<byte> reloc_bytevector = GetFullRelocationInfo( | |
+ code_desc, reloc_info_from_patchpoints); | |
+ | |
+ // Allocate and install the code. | |
+ if (info()->IsStub()) UNIMPLEMENTED(); // Probably different flags for stubs. | |
+ Code::Flags flags = Code::ComputeFlags(info()->output_code_kind()); | |
+ Handle<Code> code = isolate->factory()->NewLLVMCode( | |
+ code_desc, safepoint_table_desc, &reloc_bytevector, flags); | |
+ isolate->counters()->total_compiled_code_size()->Increment( | |
+ code->instruction_size()); | |
+ | |
+ SetUpDeoptimizationData(code, stackmaps); | |
+ | |
+ // TODO(llvm): it is not thread-safe. It's not anything-safe. | |
+ // We assume a new function gets attention after the previous one | |
+ // has been fully processed by llv8. | |
+ LLVMGranularity::getInstance().memory_manager_ref()->DropStackmaps(); | |
+#ifdef DEBUG | |
+ std::cerr << "Instruction start: " | |
+ << reinterpret_cast<void*>(code->instruction_start()) << std::endl; | |
+#endif | |
+ | |
+#ifdef DEBUG | |
+ LLVMGranularity::getInstance().Disass( | |
+ code->instruction_start(), | |
+ code->instruction_start() + code->instruction_size()); | |
+ | |
+ std::cerr << "\nRelocInfo (size = " << code->relocation_size() << ")\n"; | |
+ for (RelocIterator it(*code.location()); !it.done(); it.next()) { | |
+ it.rinfo()->Print(isolate, std::cerr); | |
+ } | |
+ std::cerr << "\n"; | |
+ | |
+ DumpSafepoints(*code); | |
+#else | |
+ USE(DumpSafepoints); | |
+#endif | |
+ return code; | |
+} | |
+ | |
+ | |
+void LLVMChunk::WriteTranslation(LLVMEnvironment* environment, | |
+ Translation* translation, | |
+ const StackMaps& stackmaps, | |
+ int32_t patchpoint_id, | |
+ int start_index) { | |
+ if (!environment) return; | |
+ | |
+ if (environment->outer()) { | |
+ WriteTranslation(environment->outer(), translation, stackmaps, patchpoint_id, | |
+ start_index - environment->outer()->translation_size()); | |
+ } | |
+ | |
+ // TODO(llvm): this isn't very good performance-wise. Esp. considering | |
+ // the result of this call is the same across recursive invocations. | |
+ auto stackmap_record = stackmaps.computeRecordMap()[patchpoint_id]; | |
+ | |
+ // The translation includes one command per value in the environment. | |
+ int translation_size = environment->translation_size(); | |
+ // The output frame height does not include the parameters. | |
+ int height = translation_size - environment->parameter_count(); | |
+ | |
+ int shared_id = deopt_data_->DefineDeoptimizationLiteral( | |
+ environment->entry() ? environment->entry()->shared() | |
+ : info()->shared_info()); | |
+ int closure_id = deopt_data_->DefineDeoptimizationLiteral( | |
+ environment->closure()); | |
+ // WriteTranslationFrame | |
+ switch (environment->frame_type()) { | |
+ case JS_FUNCTION: | |
+ translation->BeginJSFrame(environment->ast_id(), shared_id, height); | |
+ if (info()->closure().is_identical_to(environment->closure())) { | |
+ translation->StoreJSFrameFunction(); | |
+ } else { | |
+ translation->StoreLiteral(closure_id); | |
+ } | |
+ break; | |
+ case JS_CONSTRUCT: | |
+ translation->BeginConstructStubFrame(shared_id, translation_size); | |
+ if (info()->closure().is_identical_to(environment->closure())) { | |
+ translation->StoreJSFrameFunction(); | |
+ } else { | |
+ translation->StoreLiteral(closure_id); | |
+ } | |
+ break; | |
+ case JS_GETTER: | |
+ UNIMPLEMENTED(); | |
+ break; | |
+ case JS_SETTER: | |
+ UNIMPLEMENTED(); | |
+ break; | |
+ case ARGUMENTS_ADAPTOR: | |
+ translation->BeginArgumentsAdaptorFrame(shared_id, translation_size); | |
+ if (info()->closure().is_identical_to(environment->closure())) { | |
+ translation->StoreJSFrameFunction(); | |
+ } else { | |
+ translation->StoreLiteral(closure_id); | |
+ } | |
+ break; | |
+ case STUB: | |
+ UNIMPLEMENTED(); | |
+ break; | |
+ default: | |
+ UNIMPLEMENTED(); | |
+ } | |
+ | |
+ int object_index = 0; | |
+ int dematerialized_index = 0; | |
+ | |
+ DCHECK_LE(start_index + translation_size - 1, | |
+ stackmap_record.locations.size()); | |
+ for (int i = 0; i < translation_size; ++i) { | |
+ // i-th Location in a stackmap record Location corresponds to the i-th | |
+ // llvm::Value*. Here's an excerpt from the doc: | |
+ // > The runtime must be able to interpret the stack map record given only | |
+ // > the ID, offset, and the order of the locations, *which LLVM preserves*. | |
+ | |
+ // FIXME(llvm): It seems we cannot really use llvm::Value* here, because | |
+ // since we generated them optimization has happened | |
+ // (therefore those values are now invalid). | |
+ | |
+ llvm::Value* value = environment->values()->at(i); | |
+ StackMaps::Location location = stackmap_record.locations[i + start_index]; | |
+ AddToTranslation(environment, | |
+ translation, | |
+ value, | |
+ location, | |
+ stackmaps.constants, | |
+ environment->HasTaggedValueAt(i), | |
+ environment->HasUint32ValueAt(i), | |
+ environment->HasDoubleValueAt(i), | |
+ &object_index, | |
+ &dematerialized_index); | |
+ } | |
+} | |
+ | |
+// As far as I understand, index is CallerPC-relative offset | |
+// i.e. relative to the stack cell holding the ret address. | |
+static int FpRelativeOffsetToIndex(int32_t offset) { | |
+ // ........................ | |
+ // index fp-relative offset (decimal) | |
+ // -1 | arg N (the last) | +16 | |
+ // 0 | RET | +8 | |
+ // 1 | saved FP | 0 | |
+ // 2 | saved context | -8 | |
+ // ........................ | |
+ | |
+ DCHECK(offset % kInt32Size == 0); | |
+ if (FLAG_enable_embedded_constant_pool) // This would change the pic above. | |
+ UNIMPLEMENTED(); | |
+ auto index = -offset / kPointerSize + 1; | |
+ return index; | |
+} | |
+ | |
+void LLVMChunk::AddToTranslation( | |
+ LLVMEnvironment* environment, // TODO(llvm): unused? | |
+ Translation* translation, | |
+ llvm::Value* op, | |
+ StackMaps::Location& location, | |
+ const std::vector<StackMaps::Constant> constants, | |
+ bool is_tagged, | |
+ bool is_uint32, | |
+ bool is_double, | |
+ int* object_index_pointer, | |
+ int* dematerialized_index_pointer) { | |
+ | |
+ if (op == LLVMEnvironment::materialization_marker()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ | |
+ // TODO(llvm): What about StoreDouble..()? | |
+ // It's an unimplemented case which might be hidden | |
+ if (location.kind == StackMaps::Location::kDirect) { | |
+ UNIMPLEMENTED(); | |
+ } else if (location.kind == StackMaps::Location::kIndirect) { | |
+ Register reg = location.dwarf_reg.reg().IntReg(); | |
+ if (!reg.is(rbp)) UNIMPLEMENTED(); | |
+ auto index = FpRelativeOffsetToIndex(location.offset); | |
+ if (is_tagged) { | |
+ DCHECK(location.size == kPointerSize); | |
+ translation->StoreStackSlot(index); | |
+ } else if (is_uint32) { | |
+ DCHECK(location.size == kInt32Size); | |
+ translation->StoreUint32StackSlot(index); | |
+ } else { | |
+ if (is_double) { | |
+ DCHECK(location.size == kDoubleSize); | |
+ translation->StoreDoubleStackSlot(index); | |
+ } else { | |
+ DCHECK(location.size == kInt32Size); | |
+ translation->StoreInt32StackSlot(index); | |
+ } | |
+ } | |
+ } else if (location.kind == StackMaps::Location::kRegister) { | |
+ StackMapReg stack_reg = location.dwarf_reg.reg(); | |
+ if (stack_reg.IsIntReg()) { | |
+ Register reg = stack_reg.IntReg(); | |
+ if (is_tagged) { | |
+ translation->StoreRegister(reg); | |
+ } else if (is_uint32) { | |
+ translation->StoreUint32Register(reg); | |
+ } else { | |
+ translation->StoreInt32Register(reg); | |
+ } | |
+ } else if (stack_reg.IsDoubleReg()) { | |
+ XMMRegister reg = stack_reg.XMMReg(); | |
+ translation->StoreDoubleRegister(reg); | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ } else if (location.kind == StackMaps::Location::kConstantIndex) { | |
+ // FIXME(llvm): We assume large constant is a heap object address | |
+ // this block has not really been thoroughly tested | |
+ auto value = constants[location.offset].integer; | |
+ | |
+ if ((value & kSmiTagMask) == 0) { // A smi. | |
+ auto int_val = IntHelper::AsInt32(value >> (kSmiTagSize + kSmiShiftSize)); | |
+ // FIXME(llvm): is it an OK way to create a handle for a Smi? | |
+ int literal_id = deopt_data_->DefineDeoptimizationLiteral( | |
+ isolate()->factory()->NewNumberFromInt(int_val, TENURED)); | |
+ translation->StoreLiteral(literal_id); | |
+ } else { | |
+ if (reloc_data_->reloc_map().count(value)) { | |
+ auto pair = reloc_data_->reloc_map()[value]; | |
+ LLVMRelocationData::ExtendedInfo minfo = pair.second; | |
+ if (minfo.cell_extended) { | |
+ DCHECK((value & 0xffffffff) == LLVMChunkBuilder::kExtFillingValue); | |
+ value >>= 32; | |
+ } | |
+ } | |
+ Handle<Object> const_obj = bit_cast<Handle<HeapObject> >( | |
+ static_cast<intptr_t>(value)); | |
+ int literal_id = deopt_data_->DefineDeoptimizationLiteral(const_obj); | |
+ translation->StoreLiteral(literal_id); | |
+ } | |
+ } else if (location.kind == StackMaps::Location::kConstant) { | |
+ int literal_id = deopt_data_->DefineDeoptimizationLiteral( | |
+ isolate()->factory()->NewNumberFromInt(location.offset, TENURED)); | |
+ translation->StoreLiteral(literal_id); | |
+ } else { | |
+ UNREACHABLE(); | |
+ } | |
+} | |
+ | |
+int LLVMChunk::WriteTranslationFor(LLVMEnvironment* env, | |
+ const StackMaps& stackmaps) { | |
+ int frame_count = 0; | |
+ int jsframe_count = 0; | |
+ for (LLVMEnvironment* e = env; e != NULL; e = e->outer()) { | |
+ ++frame_count; | |
+ if (e->frame_type() == JS_FUNCTION) { | |
+ ++jsframe_count; | |
+ } | |
+ } | |
+ Translation translation(&deopt_data_->translations(), frame_count, | |
+ jsframe_count, zone()); | |
+ | |
+ // All of the LLVMEnvironments (closure of env by ->outer()) | |
+ // share the same associated patchpoint_id. | |
+ auto patchpoint_id = deopt_data_->GetPatchpointIdByEnvironment(env); | |
+ // But they have different start indices in the corresponding | |
+ // Stack Map record. Layout of the Stack Map record (order of Locations) | |
+ // is the same as that of the TranslationBuffer i.e. the most outer first. | |
+ auto stackmap_record = stackmaps.computeRecordMap()[patchpoint_id]; | |
+ auto total_size = IntHelper::AsInt(stackmap_record.locations.size()); | |
+ auto start_index_inner = total_size - env->translation_size(); | |
+ WriteTranslation( | |
+ env, &translation, stackmaps, patchpoint_id, start_index_inner); | |
+ return translation.index(); | |
+} | |
+ | |
+int LLVMDeoptData::DefineDeoptimizationLiteral(Handle<Object> literal) { | |
+ int result = deoptimization_literals_.length(); | |
+ for (int i = 0; i < deoptimization_literals_.length(); ++i) { | |
+ if (deoptimization_literals_[i].is_identical_to(literal)) return i; | |
+ } | |
+ deoptimization_literals_.Add(literal, zone_); | |
+ return result; | |
+} | |
+ | |
+void* LLVMDeoptData::GetKey(int32_t patchpoint_id) { | |
+ DCHECK(patchpoint_id >= 0); | |
+ auto new_int = new(zone_) int32_t; | |
+ *new_int = patchpoint_id; | |
+ return new_int; | |
+} | |
+ | |
+uint32_t LLVMDeoptData::GetHash(int32_t patchpoint_id) { | |
+ DCHECK(patchpoint_id >= 0); | |
+ return static_cast<uint32_t>(patchpoint_id); | |
+} | |
+ | |
+void LLVMDeoptData::Add(LLVMEnvironment* environment, int32_t patchpoint_id) { | |
+ auto key = GetKey(patchpoint_id); | |
+ auto hash = GetHash(patchpoint_id); | |
+ auto entry = deoptimizations_.LookupOrInsert(key, hash, | |
+ ZoneAllocationPolicy(zone_)); | |
+ entry->value = environment; | |
+ | |
+ reverse_deoptimizations_[environment] = patchpoint_id; | |
+} | |
+ | |
+LLVMEnvironment* LLVMDeoptData::GetEnvironmentByPatchpointId( | |
+ int32_t patchpoint_id) { | |
+ auto key = GetKey(patchpoint_id); | |
+ auto hash = GetHash(patchpoint_id); | |
+ auto entry = deoptimizations_.Lookup(key, hash); | |
+ return static_cast<LLVMEnvironment*>(entry->value); | |
+} | |
+ | |
+int32_t LLVMDeoptData::GetPatchpointIdByEnvironment(LLVMEnvironment* env) { | |
+ return reverse_deoptimizations_[env]; | |
+} | |
+ | |
+ | |
+std::vector<RelocInfo> LLVMChunk::SetUpRelativeCalls( | |
+ Address start, | |
+ const StackMaps& stackmaps) { | |
+ std::vector<RelocInfo> result; | |
+ | |
+ for (auto i = 0; i < stackmaps.records.size(); i++) { | |
+ auto record = stackmaps.records[i]; | |
+ auto id = record.patchpointID; | |
+ if (!reloc_data_->IsPatchpointIdReloc(id)) continue; | |
+ | |
+ auto pc_offset = start + record.instructionOffset; | |
+ *pc_offset++ = 0xE8; // Call relative offset. | |
+ | |
+ if (reloc_data_->IsPatchpointIdDeopt(id)) { | |
+ // Record relocatable runtime entry (deoptimization bailout target). | |
+ RelocInfo reloc_info(pc_offset, RelocInfo::RUNTIME_ENTRY, 0, nullptr); | |
+ result.push_back(reloc_info); | |
+ auto delta = deopt_target_offset_for_ppid_[id]; | |
+ Memory::int32_at(pc_offset) = IntHelper::AsInt32(delta); | |
+ | |
+ // Record deoptimization reason. | |
+ if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling()) { | |
+ RelocInfo deopt_reason(pc_offset - 1, RelocInfo::DEOPT_REASON, | |
+ reloc_data_->GetDeoptReason(id), nullptr); | |
+ result.push_back(deopt_reason); | |
+ } | |
+ } else { | |
+ if (reloc_data_->IsPatchpointIdRelocNop(id)) { | |
+ // TODO(llvm): it's always CODE_TARGET for now. | |
+ RelocInfo reloc_info(pc_offset, RelocInfo::CODE_TARGET, 0, nullptr); | |
+ result.push_back(reloc_info); | |
+ Memory::uint32_at(pc_offset) = target_index_for_ppid_[id]; | |
+ pc_offset = pc_offset + 4; | |
+ *pc_offset++ = Assembler::kNopByte; | |
+ } else { | |
+ DCHECK(reloc_data_->IsPatchpointIdReloc(id)); | |
+ // TODO(llvm): it's always CODE_TARGET for now. | |
+ RelocInfo reloc_info(pc_offset, RelocInfo::CODE_TARGET, 0, nullptr); | |
+ result.push_back(reloc_info); | |
+ Memory::uint32_at(pc_offset) = target_index_for_ppid_[id]; | |
+ } | |
+ } | |
+ } | |
+ | |
+ return result; | |
+} | |
+ | |
+StackMaps LLVMChunk::GetStackMaps() { | |
+ List<byte*>& stackmap_list = | |
+ LLVMGranularity::getInstance().memory_manager_ref()->stackmaps(); | |
+ | |
+ if (stackmap_list.length() == 0) { | |
+ StackMaps empty; | |
+ return empty; | |
+ } | |
+ | |
+ DCHECK(stackmap_list.length() == 1); | |
+ | |
+ StackMaps stackmaps; | |
+ DataView view(stackmap_list[0]); | |
+ stackmaps.parse(&view); | |
+#ifdef DEBUG | |
+ stackmaps.dumpMultiline(std::cerr, " "); | |
+#endif | |
+ | |
+ // Because we only have one function. This could change in the future. | |
+ DCHECK(stackmaps.stack_sizes.size() == 1); | |
+ | |
+ return stackmaps; | |
+} | |
+ | |
+void LLVMChunk::EmitSafepointTable(Assembler* assembler, | |
+ StackMaps& stackmaps, | |
+ Address instruction_start) { | |
+ SafepointTableBuilder safepoints_builder(zone()); | |
+ | |
+ // TODO(llvm): safepoints should probably be sorted by position in the code (!) | |
+ // As of today, the search @ SafepointTable::FindEntry is linear though. | |
+ | |
+ int safepoint_arguments = 0; | |
+ // TODO(llvm): There's also kWithRegisters. And with doubles... | |
+ Safepoint::Kind kind = Safepoint::kSimple; | |
+ Safepoint::DeoptMode deopt_mode = Safepoint::kLazyDeopt; | |
+ | |
+ for (auto stackmap_record : stackmaps.records) { | |
+ auto patchpoint_id = stackmap_record.patchpointID; | |
+ if (!reloc_data_->IsPatchpointIdSafepoint(patchpoint_id)) continue; | |
+ | |
+ auto num_passed_args = | |
+ reloc_data_->GetNumSafepointFuncionArgs(patchpoint_id); | |
+ unsigned pc_offset = stackmap_record.instructionOffset; | |
+ int call_instr_size = LLVMGranularity::getInstance().CallInstructionSizeAt( | |
+ instruction_start + pc_offset); | |
+ DCHECK_GT(call_instr_size, 0); | |
+ pc_offset += call_instr_size; | |
+ Safepoint safepoint = safepoints_builder.DefineSafepoint( | |
+ pc_offset, kind, safepoint_arguments, deopt_mode, num_passed_args); | |
+ | |
+ // First three locations are constants describing the calling convention, | |
+ // flags passed to the statepoint intrinsic and the number of following | |
+ // deopt Locations. | |
+ CHECK(stackmap_record.locations.size() >= 3); | |
+ | |
+ for (auto i = 3; i < stackmap_record.locations.size(); i++) { | |
+ auto location = stackmap_record.locations[i]; | |
+ // FIXME(llvm): LLVM bug (should be Indirect). See discussion here: | |
+ // http://lists.llvm.org/pipermail/llvm-dev/2015-November/092394.html | |
+ if (location.kind == StackMaps::Location::kDirect) { | |
+ Register reg = location.dwarf_reg.reg().IntReg(); | |
+ if (!reg.is(rbp)) UNIMPLEMENTED(); | |
+ DCHECK_LT(location.offset, 0); | |
+ DCHECK_EQ(location.size, kPointerSize); | |
+ auto index = -location.offset / kPointerSize; | |
+ // Safepoint table indices are 0-based from the beginning of the spill | |
+ // slot area, adjust appropriately. | |
+ index -= kPhonySpillCount; | |
+ safepoint.DefinePointerSlot(index, zone()); | |
+ } else if (location.kind == StackMaps::Location::kIndirect) { | |
+ UNIMPLEMENTED(); | |
+ } else if (location.kind == StackMaps::Location::kConstantIndex) { | |
+ // FIXME(llvm): why do we have these kinds of locations? | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ } | |
+ } | |
+ | |
+ bool llvmed = true; | |
+ safepoints_builder.Emit(assembler, SpilledCount(stackmaps), llvmed); | |
+} | |
+ | |
+Vector<byte> LLVMChunk::GetFullRelocationInfo( | |
+ CodeDesc& code_desc, | |
+ const std::vector<RelocInfo>& reloc_data_from_patchpoints) { | |
+ // Relocation info comes from 2 sources: | |
+ // 1) reloc info already present in reloc_data_; | |
+ // 2) patchpoints (CODE_TARGET reloc info has to be extracted from them). | |
+ const std::vector<RelocInfo>& reloc_data_2 = reloc_data_from_patchpoints; | |
+ std::vector<RelocInfo> reloc_data_1 = LLVMGranularity::getInstance().Patch( | |
+ code_desc.buffer, code_desc.buffer + code_desc.instr_size, | |
+ reloc_data_->reloc_map()); | |
+ RelocInfoBuffer buffer_writer(8, code_desc.buffer); | |
+ // Mege reloc infos, sort all of them by pc_ and write to the buffer. | |
+ std::vector<RelocInfo> reloc_data_merged; | |
+ reloc_data_merged.insert(reloc_data_merged.end(), | |
+ reloc_data_1.begin(), reloc_data_1.end()); | |
+ reloc_data_merged.insert(reloc_data_merged.end(), | |
+ reloc_data_2.begin(), reloc_data_2.end()); | |
+ std::sort(reloc_data_merged.begin(), reloc_data_merged.end(), | |
+ [](const RelocInfo a, const RelocInfo b) { | |
+ return a.pc() < b.pc(); | |
+ }); | |
+ for (auto r : reloc_data_merged) buffer_writer.Write(&r); | |
+ v8::internal::Vector<byte> reloc_bytevector = buffer_writer.GetResult(); | |
+ // TODO(llvm): what's up with setting reloc_info's host_ to *code? | |
+ return reloc_bytevector; | |
+} | |
+ | |
+int LLVMChunk::SpilledCount(const StackMaps& stackmaps) { | |
+ int stack_size = IntHelper::AsInt(stackmaps.stackSize()); | |
+ DCHECK(stack_size / kStackSlotSize - kPhonySpillCount >= 0); | |
+ return stack_size / kStackSlotSize - kPhonySpillCount; | |
+} | |
+ | |
+void LLVMChunk::SetUpDeoptimizationData(Handle<Code> code, | |
+ StackMaps& stackmaps) { | |
+ code->set_stack_slots(SpilledCount(stackmaps)); | |
+ | |
+ std::vector<uint32_t> sorted_ids; | |
+ int max_deopt = 0; | |
+ for (auto i = 0; i < stackmaps.records.size(); i++) { | |
+ auto id = stackmaps.records[i].patchpointID; | |
+ if (reloc_data_->IsPatchpointIdDeopt(id)) { | |
+ sorted_ids.push_back(id); | |
+ int bailout_id = reloc_data_->GetBailoutId(id); | |
+ if (bailout_id > max_deopt) max_deopt = bailout_id; | |
+ } | |
+ } | |
+ CHECK(max_deopt >= 0); | |
+ std::sort(sorted_ids.begin(), sorted_ids.end()); | |
+ size_t true_deopt_count = max_deopt + 1; | |
+ Handle<DeoptimizationInputData> data = | |
+ DeoptimizationInputData::New(isolate(), | |
+ IntHelper::AsInt(true_deopt_count), TENURED); | |
+ | |
+ // FIXME(llvm): This invariant fails when optimizer duplicates a deopt branch. | |
+ CHECK_EQ(std::set<int>(sorted_ids.begin(), sorted_ids.end()).size(), | |
+ sorted_ids.size()); | |
+ if (true_deopt_count == 0) return; | |
+ | |
+ // It's important. It seems something expects deopt entries to be stored | |
+ // is the same order they were added. | |
+ for (auto deopt_entry_number = 0; | |
+ deopt_entry_number < sorted_ids.size(); | |
+ deopt_entry_number++) { | |
+ | |
+ auto stackmap_id = sorted_ids[deopt_entry_number]; | |
+ CHECK(reloc_data_->IsPatchpointIdDeopt(stackmap_id)); | |
+ int bailout_id = reloc_data_->GetBailoutId(stackmap_id); | |
+ | |
+ auto env = deopt_data_->GetEnvironmentByPatchpointId(stackmap_id); | |
+ | |
+ env->set_has_been_used(); | |
+ if (env->HasBeenRegistered()) continue; | |
+ | |
+ int translation_index = WriteTranslationFor(env, stackmaps); | |
+ // pc offset can be obtained from the stackmap TODO(llvm): | |
+ // but we do not support lazy deopt yet (and for eager it should be -1) | |
+ env->Register(bailout_id, translation_index, -1); | |
+ | |
+ data->SetAstId(bailout_id, env->ast_id()); | |
+ data->SetTranslationIndex(bailout_id, | |
+ Smi::FromInt(translation_index)); | |
+ data->SetArgumentsStackHeight(bailout_id, | |
+ Smi::FromInt(env->arguments_stack_height())); | |
+ data->SetPc(deopt_entry_number, Smi::FromInt(-1)); | |
+ } | |
+ | |
+ auto length_before = deopt_data_->deoptimization_literals().length(); | |
+ for (auto function : inlined_functions()) { | |
+ deopt_data_->DefineDeoptimizationLiteral(function); | |
+ } | |
+ auto inlined_function_count = deopt_data_->deoptimization_literals().length() | |
+ - length_before; | |
+ data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count)); | |
+ | |
+ auto literals_len = deopt_data_->deoptimization_literals().length(); | |
+ Handle<FixedArray> literals = isolate()->factory()->NewFixedArray( | |
+ literals_len, TENURED); | |
+ { | |
+ AllowDeferredHandleDereference copy_handles; | |
+ for (int i = 0; i < literals_len; i++) { | |
+ literals->set(i, *(deopt_data_->deoptimization_literals()[i])); | |
+ } | |
+ data->SetLiteralArray(*literals); | |
+ } | |
+ | |
+ Handle<ByteArray> translations = | |
+ deopt_data_->translations().CreateByteArray(isolate()->factory()); | |
+ data->SetTranslationByteArray(*translations); | |
+ | |
+ data->SetOptimizationId(Smi::FromInt(info()->optimization_id())); | |
+ if (info()->IsOptimizing()) { | |
+ // Reference to shared function info does not change between phases. | |
+ AllowDeferredHandleDereference allow_handle_dereference; | |
+ data->SetSharedFunctionInfo(*info()->shared_info()); | |
+ } else { | |
+ data->SetSharedFunctionInfo(Smi::FromInt(0)); | |
+ } | |
+ data->SetWeakCellCache(Smi::FromInt(0)); // I don't know what this is. | |
+ data->SetOsrAstId(Smi::FromInt(info()->osr_ast_id().ToInt())); | |
+ data->SetOsrPcOffset(Smi::FromInt(6)); | |
+ | |
+ code->set_deoptimization_data(*data); | |
+} | |
+ | |
+// TODO(llvm): refactor. DRY + move disass features to a separate file. | |
+// Also, we shall not need an instance of LLVMGranularity for such things. | |
+// Returns size of the instruction starting at pc or -1 if an error occurs. | |
+int LLVMGranularity::CallInstructionSizeAt(Address pc) { | |
+ auto triple = x64_target_triple; | |
+ std::string err; | |
+ const llvm::Target* target = llvm::TargetRegistry::lookupTarget(triple, | |
+ err); | |
+ DCHECK(target); | |
+ std::unique_ptr<llvm::MCRegisterInfo> mri(target->createMCRegInfo(triple)); | |
+ DCHECK(mri); | |
+ std::unique_ptr<llvm::MCAsmInfo> mai(target->createMCAsmInfo(*mri, triple)); | |
+ DCHECK(mai); | |
+ std::unique_ptr<llvm::MCInstrInfo> mii(target->createMCInstrInfo()); | |
+ DCHECK(mii); | |
+ std::string feature_str; | |
+ const llvm::StringRef cpu = ""; | |
+ std::unique_ptr<llvm::MCSubtargetInfo> sti( | |
+ target->createMCSubtargetInfo(triple, cpu, feature_str)); | |
+ DCHECK(sti); | |
+ llvm::MCContext mc_context(mai.get(), mri.get(), nullptr); | |
+ std::unique_ptr<llvm::MCDisassembler> disasm( | |
+ target->createMCDisassembler(*sti, mc_context)); | |
+ DCHECK(disasm); | |
+ | |
+ llvm::MCInst inst; | |
+ uint64_t size; | |
+ auto max_instruction_lenght = 15; // True for x64. | |
+ | |
+ llvm::MCDisassembler::DecodeStatus s = disasm->getInstruction( | |
+ inst /* out */, size /* out */, | |
+ llvm::ArrayRef<uint8_t>(pc, pc + max_instruction_lenght), | |
+ 0, llvm::nulls(), llvm::nulls()); | |
+ | |
+ std::unique_ptr<const llvm::MCInstrAnalysis> mia( | |
+ target->createMCInstrAnalysis(mii.get())); | |
+ DCHECK(mia); | |
+ | |
+ if (s == llvm::MCDisassembler::Success && mia->isCall(inst)) | |
+ return IntHelper::AsInt(size); | |
+ else | |
+ return -1; | |
+} | |
+ | |
+std::vector<RelocInfo> LLVMGranularity::Patch( | |
+ Address start, Address end, LLVMRelocationData::RelocMap& reloc_map) { | |
+ std::vector<RelocInfo> updated_reloc_infos; | |
+ | |
+ // TODO(llvm): | |
+ // This dumb duplication from Disass() looks like it has to be refactored. | |
+ // But this Patch() technique itself is not a production quality solution | |
+ // so it should be gone and is not worth refactoring. | |
+ auto triple = x64_target_triple; | |
+ std::string err; | |
+ const llvm::Target* target = llvm::TargetRegistry::lookupTarget(triple, | |
+ err); | |
+ DCHECK(target); | |
+ std::unique_ptr<llvm::MCRegisterInfo> mri(target->createMCRegInfo(triple)); | |
+ DCHECK(mri); | |
+ std::unique_ptr<llvm::MCAsmInfo> mai(target->createMCAsmInfo(*mri, triple)); | |
+ DCHECK(mai); | |
+ std::unique_ptr<llvm::MCInstrInfo> mii(target->createMCInstrInfo()); | |
+ DCHECK(mii); | |
+ std::string feature_str; | |
+ const llvm::StringRef cpu = ""; | |
+ std::unique_ptr<llvm::MCSubtargetInfo> sti( | |
+ target->createMCSubtargetInfo(triple, cpu, feature_str)); | |
+ DCHECK(sti); | |
+ auto intel_syntax = 1; | |
+ inst_printer_ = std::unique_ptr<llvm::MCInstPrinter>( | |
+ target->createMCInstPrinter(llvm::Triple(llvm::Triple::normalize(triple)), | |
+ intel_syntax, *mai, *mii, *mri)); | |
+ inst_printer_->setPrintImmHex(true); | |
+ DCHECK(inst_printer_); | |
+ llvm::MCContext mc_context(mai.get(), mri.get(), nullptr); | |
+ std::unique_ptr<llvm::MCDisassembler> disasm( | |
+ target->createMCDisassembler(*sti, mc_context)); | |
+ DCHECK(disasm); | |
+ | |
+ auto pos = start; | |
+ while (pos < end) { | |
+ llvm::MCInst inst; | |
+ uint64_t size; | |
+ auto address = 0; | |
+ | |
+ llvm::MCDisassembler::DecodeStatus s = disasm->getInstruction( | |
+ inst /* out */, size /* out */, llvm::ArrayRef<uint8_t>(pos, end), | |
+ address, llvm::nulls(), llvm::nulls()); | |
+ | |
+ if (s == llvm::MCDisassembler::Fail) break; | |
+ | |
+ // const llvm::MCInstrDesc& desc = mii_->get(inst.getOpcode()); | |
+ // and testing desc.isMoveImmediate() did't work :( | |
+ | |
+ if (inst.getNumOperands() == 2 && inst.getOperand(1).isImm()) { | |
+ auto imm = static_cast<uint64_t>(inst.getOperand(1).getImm()); | |
+ if (!is_uint32(imm) && reloc_map.count(imm)) { | |
+ DCHECK(size == 10); // size of mov imm64 | |
+ auto pair = reloc_map[imm]; | |
+ RelocInfo rinfo = pair.first; | |
+ LLVMRelocationData::ExtendedInfo minfo = pair.second; | |
+ if (rinfo.rmode() == RelocInfo::CELL || | |
+ rinfo.rmode() == RelocInfo::EMBEDDED_OBJECT) { | |
+ intptr_t data = rinfo.data(); | |
+ // Our invariant which is a hack. See RecrodRelocInfo(). | |
+ DCHECK_EQ(static_cast<uint64_t>(data), imm); | |
+ if (minfo.cell_extended) { // immediate was extended from 32 bit to 64. | |
+ DCHECK((imm & 0xffffffff) == LLVMChunkBuilder::kExtFillingValue); | |
+ Memory::uintptr_at(pos + 2) = imm >> 32; | |
+ data >>= 32; | |
+ } | |
+ rinfo.set_pc(pos + 2); | |
+ rinfo.set_data(data); | |
+ updated_reloc_infos.push_back(rinfo); | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ } | |
+ } | |
+ pos += size; | |
+ } | |
+ return updated_reloc_infos; | |
+} | |
+ | |
+int LLVMChunk::GetParameterStackSlot(int index) const { | |
+ // The receiver is at index 0, the first parameter at index 1, so we | |
+ // shift all parameter indexes down by the number of parameters, and | |
+ // make sure they end up negative so they are distinguishable from | |
+ // spill slots. | |
+ int result = index - info()->num_parameters() - 1; | |
+ | |
+ DCHECK(result < 0); | |
+ return result; | |
+} | |
+LLVMChunk* LLVMChunk::NewChunk(HGraph *graph) { | |
+ DisallowHandleAllocation no_handles; | |
+ DisallowHeapAllocation no_gc; | |
+ graph->DisallowAddingNewValues(); | |
+ CompilationInfo* info = graph->info(); | |
+ | |
+ LLVMChunkBuilder builder(info, graph); | |
+ LLVMChunk* chunk = builder | |
+ .Build() | |
+ .NormalizePhis() | |
+ .GiveNamesToPointerValues() | |
+ .PlaceStatePoints() | |
+ .RewriteStatePoints() | |
+ .Optimize() | |
+ .Create(); | |
+ if (chunk == NULL) return NULL; | |
+ return chunk; | |
+} | |
+ | |
+int32_t LLVMRelocationData::GetNextUnaccountedPatchpointId() { | |
+ return ++last_patchpoint_id_; | |
+} | |
+ | |
+int32_t LLVMRelocationData::GetNextDeoptPatchpointId() { | |
+ int32_t next_id = ++last_patchpoint_id_; | |
+ DeoptIdMap map {next_id, -1}; | |
+ is_deopt_.Add(map, zone_); | |
+ return next_id; | |
+} | |
+ | |
+int32_t LLVMRelocationData::GetNextSafepointPatchpointId( | |
+ size_t num_passed_args) { | |
+ int32_t next_id = ++last_patchpoint_id_; | |
+ is_safepoint_.Add(next_id, zone_); | |
+ num_safepoint_function_args_[next_id] = num_passed_args; | |
+ return next_id; | |
+} | |
+ | |
+int32_t LLVMRelocationData::GetNextRelocPatchpointId(size_t num_passed_args, | |
+ bool is_safepoint) { | |
+ int32_t next_id; | |
+ if (is_safepoint) | |
+ next_id = GetNextSafepointPatchpointId(num_passed_args); | |
+ else | |
+ next_id = ++last_patchpoint_id_; | |
+ is_reloc_.Add(next_id, zone_); | |
+ return next_id; | |
+} | |
+ | |
+int32_t LLVMRelocationData::GetNextRelocNopPatchpointId( | |
+ size_t num_passed_args, bool is_safepoint) { | |
+ int32_t next_id = GetNextRelocPatchpointId(num_passed_args, is_safepoint); | |
+ is_reloc_with_nop_.Add(next_id, zone_); | |
+ return next_id; | |
+} | |
+ | |
+int32_t LLVMRelocationData::GetNextDeoptRelocPatchpointId() { | |
+ auto next_id = GetNextRelocPatchpointId(); | |
+ DeoptIdMap map {next_id, -1}; | |
+ is_deopt_.Add(map, zone_); | |
+ return next_id; | |
+} | |
+ | |
+size_t LLVMRelocationData::GetNumSafepointFuncionArgs(int32_t patchpoint_id) { | |
+ return num_safepoint_function_args_[patchpoint_id]; | |
+} | |
+ | |
+void LLVMRelocationData::SetDeoptReason(int32_t patchpoint_id, | |
+ Deoptimizer::DeoptReason reason) { | |
+ deopt_reasons_[patchpoint_id] = reason; | |
+} | |
+ | |
+Deoptimizer::DeoptReason LLVMRelocationData::GetDeoptReason( | |
+ int32_t patchpoint_id) { | |
+ DCHECK(deopt_reasons_.count(patchpoint_id)); | |
+ return deopt_reasons_[patchpoint_id]; | |
+} | |
+ | |
+void LLVMRelocationData::SetBailoutId(int32_t patchpoint_id, int bailout_id) { | |
+ CHECK(IsPatchpointIdDeopt(patchpoint_id)); | |
+ for (int i = 0; i < is_deopt_.length(); ++i) { | |
+ if (is_deopt_[i].patchpoint_id == patchpoint_id) { | |
+ is_deopt_[i].bailout_id = bailout_id; | |
+ return; | |
+ } | |
+ } | |
+ UNREACHABLE(); | |
+} | |
+ | |
+int LLVMRelocationData::GetBailoutId(int32_t patchpoint_id) { | |
+ CHECK(IsPatchpointIdDeopt(patchpoint_id)); | |
+ for (int i = 0; i < is_deopt_.length(); ++i) { | |
+ if (is_deopt_[i].patchpoint_id == patchpoint_id) { | |
+ CHECK(is_deopt_[i].bailout_id != -1); | |
+ return is_deopt_[i].bailout_id; | |
+ } | |
+ } | |
+ UNREACHABLE(); | |
+ return -1; | |
+} | |
+ | |
+bool LLVMRelocationData::IsPatchpointIdDeopt(int32_t patchpoint_id) { | |
+ for (int i = 0; i < is_deopt_.length(); ++i) { | |
+ if (is_deopt_[i].patchpoint_id == patchpoint_id) | |
+ return true; | |
+ } | |
+ return false; | |
+} | |
+ | |
+bool LLVMRelocationData::IsPatchpointIdSafepoint(int32_t patchpoint_id) { | |
+ return is_safepoint_.Contains(patchpoint_id); | |
+} | |
+ | |
+bool LLVMRelocationData::IsPatchpointIdReloc(int32_t patchpoint_id) { | |
+ return is_reloc_.Contains(patchpoint_id); | |
+} | |
+ | |
+bool LLVMRelocationData::IsPatchpointIdRelocNop(int32_t patchpoint_id) { | |
+ return is_reloc_with_nop_.Contains(patchpoint_id); | |
+} | |
+ | |
+ | |
+// TODO(llvm): I haven't yet decided if it's profitable to use llvm statepoint | |
+// mechanism to place safepoint polls. This function should either be used | |
+// or removed. | |
+void LLVMChunkBuilder::CreateSafepointPollFunction() { | |
+ DCHECK(module_); | |
+ DCHECK(llvm_ir_builder_); | |
+ auto new_func = module_->getOrInsertFunction("gc.safepoint_poll", | |
+ __ getVoidTy(), nullptr); | |
+ safepoint_poll_ = llvm::cast<llvm::Function>(new_func); | |
+ __ SetInsertPoint(NewBlock("Safepoint poll entry", safepoint_poll_)); | |
+ __ CreateRetVoid(); | |
+} | |
+ | |
+LLVMChunkBuilder& LLVMChunkBuilder::Build() { | |
+ llvm::LLVMContext& llvm_context = LLVMGranularity::getInstance().context(); | |
+ chunk_ = new(zone()) LLVMChunk(info(), graph()); | |
+ module_ = LLVMGranularity::getInstance().CreateModule(); | |
+ module_->setTargetTriple(LLVMGranularity::x64_target_triple); | |
+ llvm_ir_builder_ = llvm::make_unique<llvm::IRBuilder<>>(llvm_context); | |
+ pointers_.clear(); | |
+ Types::Init(llvm_ir_builder_.get()); | |
+ status_ = BUILDING; | |
+ | |
+ // TODO(llvm): decide whether to have llvm insert safepoint polls. | |
+ // CreateSafepointPollFunction(); | |
+ | |
+ // First param is context (v8, js context) which goes to rsi, | |
+ // second param is the callee's JSFunction object (rdi), | |
+ // third param is rbx for detecting osr entry, | |
+ // fourth param is Parameter 0 which is `this`. | |
+ int num_parameters = info()->num_parameters() + 4; | |
+ | |
+ std::vector<llvm::Type*> params(num_parameters, Types::tagged); | |
+ llvm::FunctionType* function_type = llvm::FunctionType::get( | |
+ Types::tagged, params, false); | |
+ function_ = llvm::cast<llvm::Function>( | |
+ module_->getOrInsertFunction(module_->getModuleIdentifier(), | |
+ function_type)); | |
+ | |
+ llvm::AttributeSet attr_set = function_->getAttributes(); | |
+ // rbp based frame so the runtime can walk the stack as before | |
+ attr_set = attr_set.addAttribute(llvm_context, | |
+ llvm::AttributeSet::FunctionIndex, | |
+ "no-frame-pointer-elim", "true"); | |
+ // Emit jumptables to .text instead of .rodata so relocation is easy. | |
+ attr_set = attr_set.addAttribute(llvm_context, | |
+ llvm::AttributeSet::FunctionIndex, | |
+ "put-jumptable-in-fn-section", "true"); | |
+ // Same for constant pools. | |
+ attr_set = attr_set.addAttribute(llvm_context, | |
+ llvm::AttributeSet::FunctionIndex, | |
+ "put-constantpool-in-fn-section", "true"); | |
+ | |
+ function_->setAttributes(attr_set); | |
+ function_->setGC(kGcStrategyName); | |
+ function_->setCallingConv(llvm::CallingConv::X86_64_V8); | |
+ | |
+ const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); | |
+ for (int i = 0; i < blocks->length(); i++) { | |
+ HBasicBlock* next = NULL; | |
+ if (i < blocks->length() - 1) next = blocks->at(i + 1); | |
+ DoBasicBlock(blocks->at(i), next); | |
+ DCHECK(!is_aborted()); | |
+ } | |
+ | |
+ ResolvePhis(); | |
+ | |
+ DCHECK(module_); | |
+ chunk()->set_llvm_function_id(std::stoi(module_->getModuleIdentifier())); | |
+ chunk()->set_deopt_data(std::move(deopt_data_)); | |
+ chunk()->set_reloc_data(reloc_data_); | |
+ status_ = DONE; | |
+ CHECK(pending_pushed_args_.is_empty()); | |
+ return *this; | |
+} | |
+ | |
+LLVMChunk* LLVMChunkBuilder::Create() { | |
+ LLVMGranularity::getInstance().AddModule(std::move(module_)); | |
+ return chunk(); | |
+} | |
+ | |
+void LLVMChunkBuilder::ResolvePhis() { | |
+ // Process the blocks in reverse order. | |
+ const ZoneList<HBasicBlock*>* blocks = graph_->blocks(); | |
+ for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) { | |
+ HBasicBlock* block = blocks->at(block_id); | |
+ ResolvePhis(block); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::ResolvePhis(HBasicBlock* block) { | |
+ for (int i = 0; i < block->phis()->length(); ++i) { | |
+ HPhi* phi = block->phis()->at(i); | |
+ for (int j = 0; j < phi->OperandCount(); ++j) { | |
+ HValue* operand = phi->OperandAt(j); | |
+ auto llvm_phi = static_cast<llvm::PHINode*>(phi->llvm_value()); | |
+ llvm::BasicBlock* operand_block = operand->block()->llvm_end_basic_block(); | |
+ // The problem is that in hydrogen there are Phi nodes whit parameters | |
+ // which are located in the same block. string-base64 -> base64ToString | |
+ // This parameters then translted into gap instructions in the phi predecessor blocks. | |
+ DCHECK(phi->OperandCount() == phi->block()->predecessors()->length()); | |
+ operand_block = phi->block()->predecessors()->at(j)->llvm_end_basic_block(); | |
+ // We need this, otherwise we will insert Use(operand) in the last block | |
+ __ SetInsertPoint(operand_block); | |
+ llvm_phi->addIncoming(Use(operand), operand_block); | |
+ | |
+ } | |
+ } | |
+} | |
+ | |
+ | |
+llvm::Type* LLVMChunkBuilder::GetLLVMType(Representation r) { | |
+ switch (r.kind()) { | |
+ case Representation::Kind::kInteger32: | |
+ return Types::i32; | |
+ case Representation::Kind::kTagged: | |
+ case Representation::Kind::kExternal: // For now. | |
+ return Types::tagged; | |
+ case Representation::Kind::kSmi: | |
+ return Types::smi; | |
+ case Representation::Kind::kDouble: | |
+ return Types::float64; | |
+ case Representation::Kind::kNone: | |
+ return nullptr; | |
+ default: | |
+ UNIMPLEMENTED(); | |
+ return nullptr; | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoDummyUse(HInstruction* instr) { | |
+ Representation r = instr->representation(); | |
+ llvm::Type* type = GetLLVMType(r); | |
+ llvm::Value* dummy_constant = nullptr; | |
+ if (r.IsInteger32()) { | |
+ dummy_constant = __ getInt32(0xdead); | |
+ } else { | |
+ dummy_constant = __ getInt64(0xdead); | |
+ } | |
+ auto casted_dummy_constant = dummy_constant; | |
+ if (type) | |
+ casted_dummy_constant = __ CreateBitOrPointerCast(dummy_constant, type); | |
+ for (int i = 1; i < instr->OperandCount(); ++i) { | |
+ if (instr->OperandAt(i)->IsControlInstruction()) continue; | |
+ Use(instr->OperandAt(i)); // Visit all operands and dummy-use them as well. | |
+ } | |
+ instr->set_llvm_value(casted_dummy_constant); | |
+} | |
+ | |
+void LLVMChunkBuilder::VisitInstruction(HInstruction* current) { | |
+ HInstruction* old_current = current_instruction_; | |
+ current_instruction_ = current; | |
+ | |
+ if (current->CanReplaceWithDummyUses()) { | |
+ DoDummyUse(current); | |
+ } else { | |
+ HBasicBlock* successor; | |
+ if (current->IsControlInstruction() && | |
+ HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) && | |
+ successor != NULL) { | |
+ __ CreateBr(Use(successor)); // Goto(successor) | |
+ } else { | |
+ current->CompileToLLVM(this); // the meat | |
+ } | |
+ } | |
+ | |
+ argument_count_ += current->argument_delta(); | |
+ DCHECK(argument_count_ >= 0); | |
+ | |
+ current_instruction_ = old_current; | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::CreateConstant(HConstant* instr, | |
+ HBasicBlock* block) { | |
+ Representation r = instr->representation(); | |
+ if (r.IsSmi()) { | |
+ // TODO(llvm): use/write a function for that | |
+ // FIXME(llvm): this block was not tested | |
+ int64_t int32_value = instr->Integer32Value(); | |
+ // FIXME(llvm): getInt64 takes uint64_t! And we want to pass signed int64. | |
+ return __ getInt64(int32_value << (kSmiShift)); | |
+ } else if (r.IsInteger32()) { | |
+ return __ getInt32(instr->Integer32Value()); | |
+ } else if (r.IsDouble()) { | |
+ return llvm::ConstantFP::get(Types::float64, | |
+ instr->DoubleValue()); | |
+ } else if (r.IsExternal()) { | |
+ // TODO(llvm): tagged type | |
+ // TODO(llvm): RelocInfo::EXTERNAL_REFERENCE | |
+ Address external_address = instr->ExternalReferenceValue().address(); | |
+ auto as_i64 = __ getInt64(reinterpret_cast<uint64_t>(external_address)); | |
+ return __ CreateBitOrPointerCast(as_i64, Types::tagged); | |
+ } else if (r.IsTagged()) { | |
+ AllowHandleAllocation allow_handle_allocation; | |
+ AllowHeapAllocation allow_heap_allocation; | |
+ Handle<Object> object = instr->handle(isolate()); | |
+ auto current_block = __ GetInsertBlock(); | |
+ if (block) { | |
+ // TODO(llvm): use binary search or a search tree. | |
+ // if constant is alredy defined in block, return that const | |
+ for (int i = 0; i < block->defined_consts()->length(); ++i) { | |
+ HValue* constant = block->defined_consts()->at(i); | |
+ if (constant->id() == instr->id()) { | |
+ DCHECK(constant->llvm_value()); | |
+ return constant->llvm_value(); | |
+ } | |
+ } | |
+ // Record constant in the current block | |
+ block->RecordConst(instr); | |
+ auto const_block = block->llvm_start_basic_block(); | |
+ // Define objects constant in the "first llvm_block" | |
+ // to avoid domination problem | |
+ __ SetInsertPoint(const_block); | |
+ } | |
+ auto result = MoveHeapObject(object); | |
+ instr->set_llvm_value(result); | |
+ __ SetInsertPoint(current_block); | |
+ return result; | |
+ } else { | |
+ UNREACHABLE(); | |
+ llvm::Value* fictive_value = nullptr; | |
+ return fictive_value; | |
+ } | |
+} | |
+ | |
+llvm::BasicBlock* LLVMChunkBuilder::NewBlock(const std::string& name, | |
+ llvm::Function* function) { | |
+ LLVMContext& llvm_context = LLVMGranularity::getInstance().context(); | |
+ if (!function) function = function_; | |
+ return llvm::BasicBlock::Create(llvm_context, name, function); | |
+} | |
+ | |
+llvm::BasicBlock* LLVMChunkBuilder::Use(HBasicBlock* block) { | |
+ if (!block->llvm_start_basic_block()) { | |
+ llvm::BasicBlock* llvm_block = NewBlock( | |
+ std::string("BlockEntry") + std::to_string(block->block_id())); | |
+ block->set_llvm_start_basic_block(llvm_block); | |
+ } | |
+ DCHECK(block->llvm_start_basic_block()); | |
+ return block->llvm_start_basic_block(); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::Use(HValue* value) { | |
+ if (value->EmitAtUses()) { | |
+ HInstruction* instr = HInstruction::cast(value); | |
+ VisitInstruction(instr); | |
+ } | |
+ DCHECK(value->llvm_value()); | |
+ DCHECK_EQ(value->llvm_value()->getType(), | |
+ GetLLVMType(value->representation())); | |
+ if (HasTaggedValue(value)) | |
+ pointers_.insert(value->llvm_value()); | |
+ return value->llvm_value(); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::SmiToInteger32(HValue* value) { | |
+ llvm::Value* res = nullptr; | |
+ if (SmiValuesAre32Bits()) { | |
+ // The smi can have tagged representation. | |
+ auto as_smi = __ CreateBitOrPointerCast(Use(value), Types::smi); | |
+ res = __ CreateLShr(as_smi, kSmiShift); | |
+ res = __ CreateTrunc(res, Types::i32); | |
+ } else { | |
+ DCHECK(SmiValuesAre31Bits()); | |
+ UNIMPLEMENTED(); | |
+ // TODO(llvm): just implement sarl(dst, Immediate(kSmiShift)); | |
+ } | |
+ return res; | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::SmiCheck(llvm::Value* value, bool negate) { | |
+ llvm::Value* value_as_smi = __ CreateBitOrPointerCast(value, Types::smi); | |
+ llvm::Value* res = __ CreateAnd(value_as_smi, __ getInt64(1)); | |
+ return __ CreateICmp(negate ? llvm::CmpInst::ICMP_NE : llvm::CmpInst::ICMP_EQ, | |
+ res, __ getInt64(0)); | |
+} | |
+ | |
+void LLVMChunkBuilder::InsertDebugTrap() { | |
+ llvm::Function* debug_trap = llvm::Intrinsic::getDeclaration( | |
+ module_.get(), llvm::Intrinsic::debugtrap); | |
+ __ CreateCall(debug_trap); | |
+} | |
+ | |
+void LLVMChunkBuilder::Assert(llvm::Value* condition, | |
+ llvm::BasicBlock* next_block) { | |
+ if (!next_block) next_block = NewBlock("After assertion"); | |
+ auto fail = NewBlock("Fail assertion"); | |
+ __ CreateCondBr(condition, next_block, fail); | |
+ __ SetInsertPoint(fail); | |
+ InsertDebugTrap(); | |
+ __ CreateUnreachable(); | |
+ __ SetInsertPoint(next_block); | |
+} | |
+ | |
+void LLVMChunkBuilder::IncrementCounter(StatsCounter* counter, int value) { | |
+ DCHECK(value != 0); | |
+ if (!FLAG_native_code_counters || !counter->Enabled()) return; | |
+ Address conter_addr = ExternalReference(counter).address(); | |
+ auto llvm_counter_addr = __ getInt64(reinterpret_cast<uint64_t>(conter_addr)); | |
+ auto casted_address = __ CreateIntToPtr(llvm_counter_addr, Types::ptr_i32); | |
+ auto llvm_conunter = __ CreateLoad(casted_address); | |
+ auto llvm_value = __ getInt32(value); | |
+ auto updated_value = __ CreateAdd(llvm_conunter, llvm_value); | |
+ __ CreateStore(updated_value, casted_address); | |
+} | |
+ | |
+void LLVMChunkBuilder::AssertSmi(llvm::Value* value, bool assert_not_smi) { | |
+ if (!emit_debug_code()) return; | |
+ | |
+ auto check = SmiCheck(value, assert_not_smi); | |
+ Assert(check); | |
+} | |
+ | |
+void LLVMChunkBuilder::AssertNotSmi(llvm::Value* value) { | |
+ bool assert_not_smi = true; | |
+ return AssertSmi(value, assert_not_smi); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::Integer32ToSmi(HValue* value) { | |
+ llvm::Value* int32_val = Use(value); | |
+ return Integer32ToSmi(int32_val); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::Integer32ToSmi(llvm::Value* value) { | |
+ llvm::Value* extended_width_val = __ CreateZExt(value, Types::smi); | |
+ return __ CreateShl(extended_width_val, kSmiShift); | |
+} | |
+ | |
+// See llvm/lib/Target/X86/X86CallingConv.td | |
+static size_t number_stack_params(size_t overall_params_num, | |
+ llvm::CallingConv::ID calling_conv) { | |
+ size_t passed_in_registers = 0; | |
+ switch (calling_conv) { | |
+ case llvm::CallingConv::X86_64_V8: { | |
+ passed_in_registers = 3; | |
+ break; | |
+ } | |
+ case llvm::CallingConv::X86_64_V8_E: { | |
+ passed_in_registers = 4; | |
+ break; | |
+ } | |
+ case llvm::CallingConv::X86_64_V8_CES: { | |
+ passed_in_registers = 3; | |
+ break; | |
+ } | |
+ case llvm::CallingConv::X86_64_V8_RWS: { | |
+ passed_in_registers = 3; | |
+ break; | |
+ } | |
+ case llvm::CallingConv::X86_64_V8_S1: { | |
+ passed_in_registers = 5; | |
+ break; | |
+ } | |
+ case llvm::CallingConv::X86_64_V8_S2: { | |
+ UNIMPLEMENTED(); | |
+ break; | |
+ } | |
+ case llvm::CallingConv::X86_64_V8_S3: { | |
+ passed_in_registers = 4; | |
+ break; | |
+ } | |
+ case llvm::CallingConv::X86_64_V8_S4: { | |
+ passed_in_registers = 4; | |
+ break; | |
+ } | |
+ case llvm::CallingConv::X86_64_V8_S5: { | |
+ passed_in_registers = 3; | |
+ break; | |
+ } | |
+ case llvm::CallingConv::X86_64_V8_S6: { | |
+ passed_in_registers = 4; | |
+ break; | |
+ } | |
+ case llvm::CallingConv::X86_64_V8_S7: { | |
+ passed_in_registers = 4; | |
+ break; | |
+ } | |
+ case llvm::CallingConv::X86_64_V8_S8: { | |
+ passed_in_registers = 2; | |
+ break; | |
+ } | |
+ case llvm::CallingConv::X86_64_V8_S9: { | |
+ passed_in_registers = 5; | |
+ break; | |
+ } | |
+ case llvm::CallingConv::X86_64_V8_S10: { | |
+ passed_in_registers = 3; | |
+ break; | |
+ } | |
+ case llvm::CallingConv::X86_64_V8_S11: { | |
+ passed_in_registers = 2; | |
+ break; | |
+ } | |
+ case llvm::CallingConv::X86_64_V8_S12: { | |
+ passed_in_registers = 2; | |
+ break; | |
+ } | |
+ case llvm::CallingConv::X86_64_V8_S13: { | |
+ passed_in_registers = 2; | |
+ break; | |
+ } | |
+ default: { | |
+ UNREACHABLE(); | |
+ } | |
+ } | |
+ return overall_params_num >= passed_in_registers ? | |
+ overall_params_num - passed_in_registers : 0; | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::CallVal(llvm::Value* callable_value, | |
+ llvm::CallingConv::ID calling_conv, | |
+ std::vector<llvm::Value*>& params, | |
+ llvm::Type* return_type, | |
+ bool record_safepoint) { | |
+ bool is_var_arg = false; | |
+ | |
+ if (!return_type) | |
+ return_type = __ getVoidTy(); | |
+ | |
+ std::vector<llvm::Type*> param_types; | |
+ for (auto param : params) | |
+ param_types.push_back(param->getType()); | |
+ llvm::FunctionType* function_type = llvm::FunctionType::get( | |
+ return_type, param_types, is_var_arg); | |
+ llvm::PointerType* ptr_to_function = function_type->getPointerTo(); | |
+ auto casted = __ CreateBitOrPointerCast(callable_value, ptr_to_function); | |
+ | |
+ llvm::CallInst* call_inst = __ CreateCall(casted, params); | |
+ call_inst->setCallingConv(calling_conv); | |
+ | |
+ if (record_safepoint) { | |
+ auto stack_params = number_stack_params(params.size(), calling_conv); | |
+ int32_t stackmap_id = | |
+ reloc_data_->GetNextSafepointPatchpointId(stack_params); | |
+ call_inst->addAttribute(llvm::AttributeSet::FunctionIndex, | |
+ "statepoint-id", std::to_string(stackmap_id)); | |
+ } else { | |
+ call_inst->addAttribute(llvm::AttributeSet::FunctionIndex, | |
+ "no-statepoint-please", "true"); | |
+ } | |
+ | |
+ return call_inst; | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::CallCode(Handle<Code> code, | |
+ llvm::CallingConv::ID calling_conv, | |
+ std::vector<llvm::Value*>& params, | |
+ bool record_safepoint) { | |
+ auto index = chunk()->masm().GetCodeTargetIndex(code); | |
+ int nop_size; | |
+ int32_t pp_id; | |
+ auto stack_params = number_stack_params(params.size(), calling_conv); | |
+ if (code->kind() == Code::BINARY_OP_IC || | |
+ code->kind() == Code::COMPARE_IC) { | |
+ pp_id = reloc_data_->GetNextRelocNopPatchpointId(stack_params, | |
+ record_safepoint); | |
+ nop_size = 6; // call relative i32 takes 5 bytes: `e8` + i32 + nop | |
+ } else { | |
+ pp_id = reloc_data_->GetNextRelocPatchpointId(stack_params, | |
+ record_safepoint); | |
+ nop_size = 5; // call relative i32 takes 5 bytes: `e8` + i32 | |
+ } | |
+ | |
+ // If we didn't have to also have a safe point at the call site, | |
+ // simple call to the patchpoint intrinsic would suffice. However | |
+ // LLVM does not support statepoints upon patchpoints (or any other intrinsics | |
+ // for that matter). Luckily, patchpoint's functionality is a subset of that | |
+ // of the statepoint intrinsic. | |
+ auto llvm_null = llvm::ConstantPointerNull::get(Types::ptr_i8); | |
+ auto result = CallStatePoint(pp_id, llvm_null, calling_conv, params, nop_size); | |
+ | |
+ // Map pp_id -> index in code_targets_. | |
+ chunk()->target_index_for_ppid()[pp_id] = index; | |
+ | |
+ | |
+ if (code->kind() == Code::BINARY_OP_IC || | |
+ code->kind() == Code::COMPARE_IC) { | |
+ // This will be optimized out anyway | |
+ llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::donothing); | |
+ __ CreateCall(intrinsic); | |
+ } | |
+ | |
+ return result; | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::CallAddress(Address target, | |
+ llvm::CallingConv::ID calling_conv, | |
+ std::vector<llvm::Value*>& params, | |
+ llvm::Type* return_type) { | |
+ auto val_addr = __ getInt64(reinterpret_cast<uint64_t>(target)); | |
+ return CallVal(val_addr, calling_conv, params, return_type); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::CallRuntimeViaId(Runtime::FunctionId id) { | |
+ return CallRuntime(Runtime::FunctionForId(id)); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::CallRuntime(const Runtime::Function* function) { | |
+ auto arg_count = function->nargs; | |
+ | |
+ Address rt_target = ExternalReference(function, isolate()).address(); | |
+ // TODO(llvm): we shouldn't always save FP regs | |
+ // moreover, we should find a way to offload such decisions to LLVM. | |
+ // TODO(llvm): With a proper calling convention implemented in LLVM | |
+ // we could call the runtime functions directly. | |
+ // For now we call the CEntryStub which calls the function | |
+ // (just as CrankShaft does). | |
+ | |
+ // Don't save FP regs because llvm will [try to] take care of that | |
+ CEntryStub ces(isolate(), function->result_size, kDontSaveFPRegs); | |
+ Handle<Code> code = Handle<Code>::null(); | |
+ { | |
+ AllowHandleAllocation allow_handles; | |
+ code = ces.GetCode(); | |
+ } | |
+ | |
+ // 1) emit relative 32 call to index which would follow the calling convention | |
+ // 2) record reloc info when we know the pc offset (RelocInfo::CODE...) | |
+ | |
+ | |
+ auto llvm_nargs = __ getInt64(arg_count); | |
+ auto target_temp = __ getInt64(reinterpret_cast<uint64_t>(rt_target)); | |
+ auto llvm_rt_target = target_temp; //__ CreateIntToPtr(target_temp, Types::ptr_i8); | |
+ auto context = GetContext(); | |
+ std::vector<llvm::Value*> args(arg_count + 3, nullptr); | |
+ args[0] = llvm_nargs; | |
+ args[1] = llvm_rt_target; | |
+ args[2] = context; | |
+ | |
+ for (int i = 0; i < pending_pushed_args_.length(); i++) { | |
+ args[arg_count + 3 - 1 - i] = pending_pushed_args_[i]; | |
+ } | |
+ pending_pushed_args_.Clear(); | |
+ | |
+ return CallCode(code, llvm::CallingConv::X86_64_V8_CES, args); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::CallRuntimeFromDeferred(Runtime::FunctionId id, | |
+ llvm::Value* context, std::vector<llvm::Value*> params) { | |
+ const Runtime::Function* function = Runtime::FunctionForId(id); | |
+ auto arg_count = function->nargs; | |
+ | |
+ Address rt_target = ExternalReference(function, isolate()).address(); | |
+ // TODO(llvm): we shouldn't always save FP regs | |
+ // moreover, we should find a way to offload such decisions to LLVM. | |
+ // TODO(llvm): With a proper calling convention implemented in LLVM | |
+ // we could call the runtime functions directly. | |
+ // For now we call the CEntryStub which calls the function | |
+ // (just as CrankShaft does). | |
+ | |
+ // Don't save FP regs because llvm will [try to] take care of that | |
+ CEntryStub ces(isolate(), function->result_size, kDontSaveFPRegs); | |
+ Handle<Code> code = Handle<Code>::null(); | |
+ { | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap; | |
+ code = ces.GetCode(); | |
+ // FIXME(llvm,gc): respect reloc info mode... | |
+ } | |
+ | |
+ // bool is_var_arg = false; | |
+ auto llvm_nargs = __ getInt64(arg_count); | |
+ auto target_temp = __ getInt64(reinterpret_cast<uint64_t>(rt_target)); | |
+ auto llvm_rt_target = __ CreateIntToPtr(target_temp, Types::ptr_i8); | |
+ std::vector<llvm::Value*> actualParams; | |
+ actualParams.push_back(llvm_nargs); | |
+ actualParams.push_back(llvm_rt_target); | |
+ actualParams.push_back(context); | |
+ for (auto i = 0; i < params.size(); ++i) | |
+ actualParams.push_back(params[i]); | |
+ llvm::Value* call_inst = CallCode(code, llvm::CallingConv::X86_64_V8_CES, actualParams); | |
+ return call_inst; | |
+} | |
+ | |
+ | |
+llvm::Value* LLVMChunkBuilder::FieldOperand(llvm::Value* base, int offset) { | |
+ // The problem is (volatile_0 + imm) + offset == volatile_0 + (imm + offset), | |
+ // so... | |
+ auto offset_val = ConstFoldBarrier(__ getInt64(offset - kHeapObjectTag)); | |
+ // I don't know why, but it works OK even if base was already an i8* | |
+ llvm::Value* base_casted = __ CreateIntToPtr(base, Types::ptr_i8); | |
+ return __ CreateGEP(base_casted, offset_val); | |
+} | |
+ | |
+// TODO(llvm): It should probably become 'load field operand as type' | |
+// with tagged as default. | |
+llvm::Value* LLVMChunkBuilder::LoadFieldOperand(llvm::Value* base, int offset, | |
+ const char* name) { | |
+ llvm::Value* address = FieldOperand(base, offset); | |
+ llvm::Value* casted_address = __ CreatePointerCast(address, | |
+ Types::ptr_tagged); | |
+ return __ CreateLoad(casted_address, name); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::ConstructAddress(llvm::Value* base, int64_t offset) { | |
+ // The problem is (volatile_0 + imm) + offset == volatile_0 + (imm + offset), | |
+ // so... | |
+ llvm::Value* offset_val = ConstFoldBarrier(__ getInt64(offset)); | |
+ llvm::Value* base_casted = __ CreateBitOrPointerCast(base, Types::ptr_i8); | |
+ auto constructed_address = __ CreateGEP(base_casted, offset_val); | |
+ return __ CreateBitOrPointerCast(constructed_address, base->getType()); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::ValueFromSmi(Smi* smi) { | |
+ intptr_t intptr_value = reinterpret_cast<intptr_t>(smi); | |
+ llvm::Value* value = __ getInt64(intptr_value); | |
+ return value; | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::MoveHeapObject(Handle<Object> object) { | |
+ if (object->IsSmi()) { | |
+ // TODO(llvm): use/write a function for that | |
+ Smi* smi = Smi::cast(*object); | |
+ llvm::Value* value = ValueFromSmi(smi); | |
+ return __ CreateBitOrPointerCast(value, Types::tagged); | |
+ } else { // Heap object | |
+ // MacroAssembler::MoveHeapObject | |
+ AllowHeapAllocation allow_allocation; | |
+ AllowHandleAllocation allow_handles; | |
+ DCHECK(object->IsHeapObject()); | |
+ if (isolate()->heap()->InNewSpace(*object)) { | |
+ Handle<Cell> new_cell = isolate()->factory()->NewCell(object); | |
+ llvm::Value* value = Move(new_cell, RelocInfo::CELL); | |
+ llvm::BasicBlock* current_block = __ GetInsertBlock(); | |
+ auto last_instr = current_block-> getTerminator(); | |
+ // if block has terminator we must insert before it | |
+ if (!last_instr) { | |
+ llvm::Value* ptr = __ CreateBitOrPointerCast(value, Types::ptr_tagged); | |
+ return __ CreateLoad(ptr); | |
+ } | |
+ llvm::Value* ptr = new llvm::BitCastInst(value, Types::ptr_tagged, "", last_instr); | |
+ return new llvm::LoadInst(ptr, "", last_instr); | |
+ } else { | |
+ return Move(object, RelocInfo::EMBEDDED_OBJECT); | |
+ } | |
+ } | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::Move(Handle<Object> object, | |
+ RelocInfo::Mode rmode) { | |
+ AllowDeferredHandleDereference using_raw_address; | |
+ DCHECK(!RelocInfo::IsNone(rmode)); | |
+ DCHECK(object->IsHeapObject()); | |
+ DCHECK(!isolate()->heap()->InNewSpace(*object)); | |
+ | |
+ uint64_t intptr_value = reinterpret_cast<uint64_t>(object.location()); | |
+ auto the_pointer = RecordRelocInfo(intptr_value, rmode); | |
+ pointers_.insert(the_pointer); | |
+ return the_pointer; | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::Compare(llvm::Value* lhs, llvm::Value* rhs) { | |
+ llvm::Value* casted_lhs = __ CreateBitOrPointerCast(lhs, Types::ptr_i8); | |
+ llvm::Value* casted_rhs = __ CreateBitOrPointerCast(rhs, Types::ptr_i8); | |
+ return __ CreateICmpEQ(casted_lhs, casted_rhs); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::Compare(llvm::Value* lhs, Handle<Object> rhs) { | |
+ AllowDeferredHandleDereference smi_check; | |
+ if (rhs->IsSmi()) { | |
+ UNIMPLEMENTED(); | |
+ // Cmp(dst, Smi::cast(*rhs)); | |
+ return nullptr; | |
+ } else { | |
+ auto type = Types::tagged; | |
+ auto llvm_rhs = __ CreateBitOrPointerCast(MoveHeapObject(rhs), type); | |
+ auto casted_lhs = __ CreateBitOrPointerCast(lhs, type); | |
+ return __ CreateICmpEQ(casted_lhs, llvm_rhs); | |
+ } | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::CompareMap(llvm::Value* object, | |
+ Handle<Map> map) { | |
+ return Compare(LoadFieldOperand(object, HeapObject::kMapOffset), map); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::CheckPageFlag(llvm::Value* object, int mask) { | |
+ auto page_align_mask = __ getInt64(~Page::kPageAlignmentMask); | |
+ // TODO(llvm): do the types match? | |
+ auto object_as_i64 = __ CreateBitOrPointerCast(object, Types::i64); | |
+ auto masked_object = __ CreateAnd(object_as_i64, page_align_mask, | |
+ "CheckPageFlag1"); | |
+ auto flags_address = ConstructAddress(masked_object, | |
+ MemoryChunk::kFlagsOffset); | |
+ auto i32_ptr_flags_address = __ CreateBitOrPointerCast(flags_address, | |
+ Types::ptr_i32); | |
+ auto flags = __ CreateLoad(i32_ptr_flags_address); | |
+ auto llvm_mask = __ getInt32(mask); | |
+ auto and_result = __ CreateAnd(flags, llvm_mask); | |
+ return __ CreateICmpEQ(and_result, __ getInt32(0), "CheckPageFlag"); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::AllocateHeapNumberSlow(HValue* unuse, llvm::Value* null_ptr) { | |
+ | |
+ // return an i8* | |
+ llvm::Value* allocated = CallRuntimeViaId(Runtime::kAllocateHeapNumber); | |
+ // RecordSafepointWithRegisters... | |
+ return allocated; | |
+} | |
+ | |
+void LLVMChunkBuilder::UpdateAllocationTopHelper(llvm::Value* result_end, | |
+ AllocationFlags flags) { | |
+ if (emit_debug_code()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ | |
+ ExternalReference allocation_top = | |
+ AllocationUtils::GetAllocationTopReference(isolate(), flags); | |
+ // Update new top. | |
+ llvm::Value* top_address = __ getInt64(reinterpret_cast<uint64_t> | |
+ (allocation_top.address())); | |
+ llvm::Value* address = __ CreateIntToPtr(top_address, Types::ptr_i64); | |
+ __ CreateStore(result_end, address); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::LoadAllocationTopHelper(AllocationFlags flags) { | |
+ ExternalReference allocation_top = | |
+ AllocationUtils::GetAllocationTopReference(isolate(), flags); | |
+ // Just return if allocation top is already known. | |
+ if ((flags & RESULT_CONTAINS_TOP) != 0) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ // Safe code. | |
+ llvm::Value* top_address = __ getInt64(reinterpret_cast<uint64_t> | |
+ (allocation_top.address())); | |
+ llvm::Value* address = __ CreateIntToPtr(top_address, Types::ptr_i64); | |
+ llvm::Value* result = __ CreateLoad(address); | |
+ return result; | |
+} | |
+ | |
+ | |
+llvm::Value* LLVMChunkBuilder::AllocateHeapNumber(MutableMode mode){ | |
+ llvm::Value* (LLVMChunkBuilder::*fptr)(HValue*, llvm::Value*); | |
+ fptr = &LLVMChunkBuilder::AllocateHeapNumberSlow; | |
+ llvm::Value* result = Allocate(__ getInt32(HeapNumber::kSize), fptr, TAG_OBJECT); | |
+ Heap::RootListIndex map_index = mode == MUTABLE | |
+ ? Heap::kMutableHeapNumberMapRootIndex | |
+ : Heap::kHeapNumberMapRootIndex; | |
+ // Set the map. | |
+ llvm::Value* root = LoadRoot(map_index); | |
+ llvm::Value* address = FieldOperand(result, HeapObject::kMapOffset); | |
+ llvm::Value* casted_address = __ CreatePointerCast(address, | |
+ Types::ptr_tagged); | |
+ __ CreateStore(root, casted_address); | |
+ return result; | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::GetContext() { | |
+ // First parameter is our context (rsi). | |
+ return function_->arg_begin(); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::GetNan() { | |
+ auto zero = llvm::ConstantFP::get(Types::float64, 0); | |
+ return __ CreateFDiv(zero, zero); | |
+} | |
+ | |
+LLVMEnvironment* LLVMChunkBuilder::AssignEnvironment() { | |
+ HEnvironment* hydrogen_env = current_block_->last_environment(); | |
+ int argument_index_accumulator = 0; | |
+ ZoneList<HValue*> objects_to_materialize(0, zone()); | |
+ return CreateEnvironment( | |
+ hydrogen_env, &argument_index_accumulator, &objects_to_materialize); | |
+} | |
+ | |
+void LLVMChunkBuilder::GetAllEnvironmentValues( | |
+ LLVMEnvironment* environment, std::vector<llvm::Value*>& mapped_values) { | |
+ if (!environment) return; | |
+ GetAllEnvironmentValues(environment->outer(), mapped_values); | |
+ for (auto val : *environment->values()) | |
+ mapped_values.push_back(val); | |
+} | |
+ | |
+void LLVMChunkBuilder::DeoptimizeIf(llvm::Value* compare, | |
+ Deoptimizer::DeoptReason deopt_reason, | |
+ bool negate, | |
+ llvm::BasicBlock* next_block) { | |
+ LLVMEnvironment* environment = AssignEnvironment(); | |
+ auto patchpoint_id = reloc_data_->GetNextDeoptRelocPatchpointId(); | |
+ deopt_data_->Add(environment, patchpoint_id); | |
+ int bailout_id = deopt_data_->DeoptCount() - 1; | |
+ reloc_data_->SetBailoutId(patchpoint_id, bailout_id); | |
+ reloc_data_->SetDeoptReason(patchpoint_id, deopt_reason); | |
+ | |
+ if (!next_block) next_block = NewBlock("BlockCont"); | |
+ llvm::BasicBlock* saved_insert_point = __ GetInsertBlock(); | |
+ | |
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) UNIMPLEMENTED(); | |
+ if (info()->ShouldTrapOnDeopt()) { | |
+ // Our trap on deopt does not allow to proceed to the actual deopt. | |
+ // It could be avoided if we ever need this though. But be prepared | |
+ // that implementation would involve some careful BB management. | |
+ if (!negate) { // We assert !compare, so if negate, we assert !!compare. | |
+ auto one = true; | |
+ compare = __ CreateXor(__ getInt1(one), compare); | |
+ } | |
+ Assert(compare, next_block); | |
+ return; | |
+ } | |
+ | |
+ Deoptimizer::BailoutType bailout_type = info()->IsStub() | |
+ ? Deoptimizer::LAZY | |
+ : Deoptimizer::EAGER; | |
+ DCHECK_EQ(bailout_type, Deoptimizer::EAGER); // We don't support lazy yet. | |
+ | |
+ Address entry; | |
+ { | |
+ AllowHandleAllocation allow; | |
+ // TODO(llvm): what if we use patchpoint_id here? | |
+ entry = Deoptimizer::GetDeoptimizationEntry(isolate(), | |
+ bailout_id, bailout_type); | |
+ } | |
+ if (entry == NULL) { | |
+ Abort(kBailoutWasNotPrepared); | |
+ return; | |
+ } | |
+ | |
+ // TODO(llvm): create Deoptimizer::DeoptInfo & Deoptimizer::JumpTableEntry (?) | |
+ | |
+ llvm::BasicBlock* deopt_block = NewBlock("DeoptBlock"); | |
+ __ SetInsertPoint(deopt_block); | |
+ | |
+ std::vector<llvm::Value*> mapped_values; | |
+ GetAllEnvironmentValues(environment, mapped_values); | |
+ | |
+ // Store offset relative to the Code Range start. It always fits in 32 bits. | |
+ // Will be fixed up later (in Code::CopyFrom). | |
+ Address start = isolate()->code_range()->start(); | |
+ chunk()->deopt_target_offset_for_ppid()[patchpoint_id] = entry - start; | |
+ | |
+ std::vector<llvm::Value*> empty; | |
+ int nop_size = 5; // Call relative i32 takes 5 bytes: `e8` + i32 | |
+ auto llvm_null = llvm::ConstantPointerNull::get(Types::ptr_i8); | |
+ CallPatchPoint(patchpoint_id, llvm_null, empty, mapped_values, nop_size); | |
+ __ CreateUnreachable(); | |
+ | |
+ __ SetInsertPoint(saved_insert_point); | |
+ if (!negate) | |
+ __ CreateCondBr(compare, deopt_block, next_block); | |
+ else | |
+ __ CreateCondBr(compare, next_block, deopt_block); | |
+ __ SetInsertPoint(next_block); | |
+} | |
+ | |
+llvm::CmpInst::Predicate LLVMChunkBuilder::TokenToPredicate(Token::Value op, | |
+ bool is_unsigned, | |
+ bool is_double) { | |
+ llvm::CmpInst::Predicate pred = llvm::CmpInst::BAD_FCMP_PREDICATE; | |
+ switch (op) { | |
+ case Token::EQ: | |
+ case Token::EQ_STRICT: | |
+ if (is_double) | |
+ pred = llvm::CmpInst::FCMP_OEQ; | |
+ else | |
+ pred = llvm::CmpInst::ICMP_EQ; | |
+ break; | |
+ case Token::NE: | |
+ case Token::NE_STRICT: | |
+ if (is_double) | |
+ pred = llvm::CmpInst::FCMP_ONE; | |
+ else | |
+ pred = llvm::CmpInst::ICMP_NE; | |
+ break; | |
+ case Token::LT: | |
+ if (is_double) | |
+ pred = llvm::CmpInst::FCMP_OLT; | |
+ else | |
+ pred = is_unsigned ? llvm::CmpInst::ICMP_ULT : llvm::CmpInst::ICMP_SLT; | |
+ break; | |
+ case Token::GT: | |
+ if (is_double) | |
+ pred = llvm::CmpInst::FCMP_OGT; | |
+ else | |
+ pred = is_unsigned ? llvm::CmpInst::ICMP_UGT : llvm::CmpInst::ICMP_SGT; | |
+ break; | |
+ case Token::LTE: | |
+ if (is_double) | |
+ pred = llvm::CmpInst::FCMP_OLE; | |
+ else | |
+ pred = is_unsigned ? llvm::CmpInst::ICMP_ULE : llvm::CmpInst::ICMP_SLE; | |
+ break; | |
+ case Token::GTE: | |
+ if (is_double) | |
+ pred = llvm::CmpInst::FCMP_OGE; | |
+ else | |
+ pred = is_unsigned ? llvm::CmpInst::ICMP_UGE : llvm::CmpInst::ICMP_SGE; | |
+ break; | |
+ case Token::IN: | |
+ case Token::INSTANCEOF: | |
+ default: | |
+ UNREACHABLE(); | |
+ } | |
+ return pred; | |
+} | |
+ | |
+bool LLVMChunkBuilder::HasTaggedValue(HValue* value) { | |
+ return value != NULL && | |
+ value->representation().IsTagged() && !value->type().IsSmi(); | |
+} | |
+ | |
+class PassInfoPrinter { | |
+ public: | |
+ PassInfoPrinter(const char* name, llvm::Module* module) | |
+ : name_(name), | |
+ module_(module) { | |
+ USE(name_); | |
+ USE(module_); | |
+#if DEBUG | |
+ if (!only_after) { | |
+ llvm::errs() << filler << "vvv Module BEFORE " << name_ <<" vvv" | |
+ << filler << "\n"; | |
+ llvm::errs() << *module_; | |
+ llvm::errs() << filler << "^^^ Module BEFORE " << name_ <<" ^^^" | |
+ << filler << "\n"; | |
+ only_after = true; | |
+ } | |
+#endif | |
+ } | |
+ ~PassInfoPrinter() { | |
+#if DEBUG | |
+ llvm::errs() << filler << "vvv Module AFTER " << name_ <<" vvv" | |
+ << filler << "\n"; | |
+ llvm::errs() << *module_; | |
+ llvm::errs() << filler << "^^^ Module AFTER " << name_ <<" ^^^" | |
+ << filler << "\n"; | |
+#endif | |
+ } | |
+ private: | |
+ static bool only_after; | |
+ static const char* filler; | |
+ const char* name_; | |
+ llvm::Module* module_; | |
+}; | |
+ | |
+const char* PassInfoPrinter::filler = "===================="; | |
+bool PassInfoPrinter::only_after = false; | |
+ | |
+// PlaceStatePoints and RewriteStatePoints may move things around a bit | |
+// (by deleting and adding instructions) so we can't refer to anything | |
+// by llvm::Value*. | |
+// This function gives names (which are preserved) to the values we want | |
+// to track. | |
+// Warning: same method may not work for all transformation passes, | |
+// because names might not be preserved. | |
+LLVMChunkBuilder& LLVMChunkBuilder::GiveNamesToPointerValues() { | |
+ PassInfoPrinter printer("GiveNamesToPointerValues", module_.get()); | |
+ DCHECK_EQ(number_of_pointers_, -1); | |
+ number_of_pointers_ = 0; | |
+ for (auto value : pointers_) { | |
+ value->setName(kPointersPrefix + std::to_string(number_of_pointers_++)); | |
+ } | |
+ // Now we have names. llvm::Value*s will soon become invalid. | |
+ pointers_.clear(); | |
+ return *this; | |
+} | |
+ | |
+void LLVMChunkBuilder::DumpPointerValues() { | |
+ DCHECK_GE(number_of_pointers_, 0); | |
+#ifdef DEBUG | |
+ std::cerr << "< POINTERS:" << "\n"; | |
+ for (auto i = 0 ; i < number_of_pointers_; i++) { | |
+ std::string name = kPointersPrefix + std::to_string(i); | |
+ auto value = function_->getValueSymbolTable().lookup(name); | |
+ if (value) | |
+ llvm::errs() << value->getName() << " | " << *value << "\n"; | |
+ } | |
+ std::cerr << "POINTERS >" << "\n"; | |
+#endif | |
+} | |
+ | |
+LLVMChunkBuilder& LLVMChunkBuilder::NormalizePhis() { | |
+ PassInfoPrinter printer("normalization", module_.get()); | |
+ llvm::legacy::FunctionPassManager pass_manager(module_.get()); | |
+ if (FLAG_phi_normalize) pass_manager.add(createNormalizePhisPass()); | |
+ pass_manager.doInitialization(); | |
+ pass_manager.run(*function_); | |
+ return *this; | |
+} | |
+ | |
+LLVMChunkBuilder& LLVMChunkBuilder::PlaceStatePoints() { | |
+ PassInfoPrinter printer("PlaceStatePoints", module_.get()); | |
+ DumpPointerValues(); | |
+ llvm::legacy::FunctionPassManager pass_manager(module_.get()); | |
+ pass_manager.add(llvm::createPlaceSafepointsPass()); | |
+ pass_manager.doInitialization(); | |
+ pass_manager.run(*function_); | |
+ pass_manager.doFinalization(); | |
+ return *this; | |
+} | |
+ | |
+LLVMChunkBuilder& LLVMChunkBuilder::RewriteStatePoints() { | |
+ PassInfoPrinter printer("RewriteStatepointsForGC", module_.get()); | |
+ DumpPointerValues(); | |
+ | |
+ std::set<llvm::Value*> pointer_values; | |
+ for (auto i = 0 ; i < number_of_pointers_; i++) { | |
+ std::string name = kPointersPrefix + std::to_string(i); | |
+ auto value = function_->getValueSymbolTable().lookup(name); | |
+ if (value) | |
+ pointer_values.insert(value); | |
+ } | |
+ | |
+ llvm::legacy::PassManager pass_manager; | |
+ pass_manager.add(v8::internal::createRewriteStatepointsForGCPass( | |
+ pointer_values)); | |
+ pass_manager.run(*module_.get()); | |
+ return *this; | |
+} | |
+ | |
+ | |
+LLVMChunkBuilder& LLVMChunkBuilder::Optimize() { | |
+ DCHECK(module_); | |
+#ifdef DEBUG | |
+ llvm::verifyFunction(*function_, &llvm::errs()); | |
+#endif | |
+ PassInfoPrinter printer("optimization", module_.get()); | |
+ | |
+ LLVMGranularity::getInstance().OptimizeFunciton(module_.get(), function_); | |
+ LLVMGranularity::getInstance().OptimizeModule(module_.get()); | |
+ return *this; | |
+} | |
+ | |
+// FIXME(llvm): obsolete. | |
+void LLVMChunkBuilder::CreateVolatileZero() { | |
+ volatile_zero_address_ = __ CreateAlloca(Types::i64); | |
+ bool is_volatile = true; | |
+ __ CreateStore(__ getInt64(0), volatile_zero_address_, is_volatile); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::GetVolatileZero() { | |
+ bool is_volatile = true; | |
+ return __ CreateLoad(volatile_zero_address_, is_volatile, "volatile_zero"); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::ConstFoldBarrier(llvm::Value* imm) { | |
+// return __ CreateAdd(GetVolatileZero(), imm); | |
+ return imm; | |
+} | |
+ | |
+void LLVMChunkBuilder::PatchReceiverToGlobalProxy() { | |
+ if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) { | |
+ auto receiver = GetParameter(0); // `this' | |
+ auto is_undefined = CompareRoot(receiver, Heap::kUndefinedValueRootIndex); | |
+ auto patch_receiver = NewBlock("Patch receiver"); | |
+ auto receiver_ok = NewBlock("Receiver OK"); | |
+ __ CreateCondBr(is_undefined, patch_receiver, receiver_ok); | |
+ __ SetInsertPoint(patch_receiver); | |
+ auto context_as_ptr_to_tagged = __ CreateBitOrPointerCast(GetContext(), | |
+ Types::ptr_tagged); | |
+ auto global_object_operand_address = ConstructAddress( | |
+ context_as_ptr_to_tagged, | |
+ Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)); | |
+ auto global_object_operand = __ CreateLoad(global_object_operand_address); | |
+ auto global_receiver = LoadFieldOperand(global_object_operand, | |
+ GlobalObject::kGlobalProxyOffset); | |
+ __ CreateBr(receiver_ok); | |
+ __ SetInsertPoint(receiver_ok); | |
+ auto phi = __ CreatePHI(Types::tagged, 2); | |
+ phi->addIncoming(receiver, receiver_ok); | |
+ phi->addIncoming(global_receiver, patch_receiver); | |
+ global_receiver_ = phi; | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoBasicBlock(HBasicBlock* block, | |
+ HBasicBlock* next_block) { | |
+#ifdef DEBUG | |
+ std::cerr << __FUNCTION__ << std::endl; | |
+#endif | |
+ DCHECK(is_building()); | |
+ __ SetInsertPoint(Use(block)); | |
+ current_block_ = block; | |
+ next_block_ = next_block; | |
+ if (block->IsStartBlock()) { | |
+ PatchReceiverToGlobalProxy(); | |
+ // Ensure every function has an associated Stack Map section. | |
+ std::vector<llvm::Value*> empty; | |
+ CallStackMap(reloc_data_->GetNextUnaccountedPatchpointId(), empty); | |
+ | |
+ //If function contains OSR entry, it's first instruction must be osr_branch | |
+ if (graph_->has_osr()) { | |
+ osr_preserved_values_.Clear(); | |
+ // We need to move llvm spill index by UnoptimizedFrameSlots count | |
+ // in order to preserve Full-Codegen local values | |
+ for (int i = 0; i < graph_->osr()->UnoptimizedFrameSlots(); ++i) { | |
+ auto alloc = __ CreateAlloca(Types::tagged); | |
+ osr_preserved_values_.Add(alloc, info()->zone()); | |
+ } | |
+ HBasicBlock* osr_block = graph_->osr()->osr_entry(); | |
+ llvm::BasicBlock* not_osr_target = NewBlock("NO_OSR_CONTINUE"); | |
+ llvm::BasicBlock* osr_target = Use(osr_block); | |
+ llvm::Value* zero = __ getInt64(0); | |
+ llvm::Value* zero_as_tagged = __ CreateBitOrPointerCast(zero, | |
+ Types::tagged); | |
+ llvm::Function::arg_iterator it = function_->arg_begin(); | |
+ int i = 0; | |
+ while (++i < 3) ++it; | |
+ llvm::Value* osr_value = it; | |
+ // Branch to OSR block | |
+ llvm::Value* compare = __ CreateICmpEQ(osr_value, zero_as_tagged); | |
+ __ CreateCondBr(compare, not_osr_target, osr_target); | |
+ __ SetInsertPoint(not_osr_target); | |
+ } | |
+ // CreateVolatileZero(); | |
+ block->UpdateEnvironment(graph_->start_environment()); | |
+ argument_count_ = 0; | |
+ } else if (block->predecessors()->length() == 1) { | |
+ // We have a single predecessor => copy environment and outgoing | |
+ // argument count from the predecessor. | |
+ DCHECK(block->phis()->length() == 0); | |
+ HBasicBlock* pred = block->predecessors()->at(0); | |
+ HEnvironment* last_environment = pred->last_environment(); | |
+ DCHECK(last_environment != NULL); | |
+ // Only copy the environment, if it is later used again. | |
+ if (pred->end()->SecondSuccessor() == NULL) { | |
+ DCHECK(pred->end()->FirstSuccessor() == block); | |
+ } else { | |
+ if (pred->end()->FirstSuccessor()->block_id() > block->block_id() || | |
+ pred->end()->SecondSuccessor()->block_id() > block->block_id()) { | |
+ last_environment = last_environment->Copy(); | |
+ } | |
+ } | |
+ block->UpdateEnvironment(last_environment); | |
+ DCHECK(pred->argument_count() >= 0); | |
+ argument_count_ = pred->argument_count(); | |
+ } else { | |
+ // We are at a state join => process phis. | |
+ HBasicBlock* pred = block->predecessors()->at(0); | |
+ // No need to copy the environment, it cannot be used later. | |
+ HEnvironment* last_environment = pred->last_environment(); | |
+ for (int i = 0; i < block->phis()->length(); ++i) { | |
+ HPhi* phi = block->phis()->at(i); | |
+ DoPhi(phi); | |
+ if (phi->HasMergedIndex()) { | |
+ last_environment->SetValueAt(phi->merged_index(), phi); | |
+ } | |
+ } | |
+ for (int i = 0; i < block->deleted_phis()->length(); ++i) { | |
+ if (block->deleted_phis()->at(i) < last_environment->length()) { | |
+ last_environment->SetValueAt(block->deleted_phis()->at(i), | |
+ graph_->GetConstantUndefined()); | |
+ } | |
+ } | |
+ block->UpdateEnvironment(last_environment); | |
+ // Pick up the outgoing argument count of one of the predecessors. | |
+ argument_count_ = pred->argument_count(); | |
+ } | |
+ HInstruction* current = block->first(); | |
+ while (current != NULL && !is_aborted()) { | |
+ // Code for constants in registers is generated lazily. | |
+ if (!current->EmitAtUses()) { | |
+ VisitInstruction(current); | |
+ } | |
+ current = current->next(); | |
+ } | |
+ block->set_argument_count(argument_count_); | |
+ block->set_llvm_end_basic_block(__ GetInsertBlock()); | |
+ next_block_ = NULL; | |
+ current_block_ = NULL; | |
+} | |
+ | |
+LLVMEnvironment* LLVMChunkBuilder::CreateEnvironment( | |
+ HEnvironment* hydrogen_env, int* argument_index_accumulator, | |
+ ZoneList<HValue*>* objects_to_materialize) { | |
+ if (hydrogen_env == NULL) return NULL; | |
+ | |
+ LLVMEnvironment* outer = | |
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator, | |
+ objects_to_materialize); | |
+ BailoutId ast_id = hydrogen_env->ast_id(); | |
+ DCHECK(!ast_id.IsNone() || | |
+ hydrogen_env->frame_type() != JS_FUNCTION); | |
+ | |
+ int omitted_count = (hydrogen_env->frame_type() == JS_FUNCTION) | |
+ ? 0 | |
+ : hydrogen_env->specials_count(); | |
+ | |
+ int value_count = hydrogen_env->length() - omitted_count; | |
+ LLVMEnvironment* result = | |
+ new(zone()) LLVMEnvironment(hydrogen_env->closure(), | |
+ hydrogen_env->frame_type(), | |
+ ast_id, | |
+ hydrogen_env->parameter_count(), | |
+ argument_count_, | |
+ value_count, | |
+ outer, | |
+ hydrogen_env->entry(), | |
+ zone()); | |
+ int argument_index = *argument_index_accumulator; | |
+ | |
+ // Store the environment description into the environment | |
+ // (with holes for nested objects) | |
+ for (int i = 0; i < hydrogen_env->length(); ++i) { | |
+ if (hydrogen_env->is_special_index(i) && | |
+ hydrogen_env->frame_type() != JS_FUNCTION) { | |
+ continue; | |
+ } | |
+ llvm::Value* op; | |
+ HValue* value = hydrogen_env->values()->at(i); | |
+ CHECK(!value->IsPushArguments()); // Do not deopt outgoing arguments | |
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) { | |
+ op = LLVMEnvironment::materialization_marker(); | |
+ UNIMPLEMENTED(); | |
+ } else { | |
+ if (value->IsConstant()) { | |
+ HConstant* instr = HConstant::cast(value); | |
+ op = CreateConstant(instr, current_block_); | |
+ } else { | |
+ op = Use(value); | |
+ } | |
+ } | |
+ // Well, we can add a corresponding llvm value here. | |
+ // Though it seems redundant... | |
+ result->AddValue(op, | |
+ value->representation(), | |
+ value->CheckFlag(HInstruction::kUint32)); | |
+ } | |
+ | |
+ // Recursively store the nested objects into the environment | |
+ for (int i = 0; i < hydrogen_env->length(); ++i) { | |
+ if (hydrogen_env->is_special_index(i)) continue; | |
+ | |
+ HValue* value = hydrogen_env->values()->at(i); | |
+ if (value->IsArgumentsObject() || value->IsCapturedObject()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ } | |
+ | |
+ if (hydrogen_env->frame_type() == JS_FUNCTION) { | |
+ *argument_index_accumulator = argument_index; | |
+ } | |
+ | |
+ return result; | |
+} | |
+ | |
+void LLVMChunkBuilder::DoPhi(HPhi* phi) { | |
+ Representation r = phi->RepresentationFromInputs(); | |
+ llvm::Type* phi_type = GetLLVMType(r); | |
+ llvm::PHINode* llvm_phi = __ CreatePHI(phi_type, phi->OperandCount()); | |
+ phi->set_llvm_value(llvm_phi); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoBlockEntry(HBlockEntry* instr) { | |
+ Use(instr->block()); | |
+ // TODO(llvm): LGap & parallel moves (OSR support) | |
+} | |
+ | |
+void LLVMChunkBuilder::DoContext(HContext* instr) { | |
+ if (instr->HasNoUses()) return; | |
+ if (info()->IsStub()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ instr->set_llvm_value(GetContext()); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::GetParameter(int index) { | |
+ DCHECK_GE(index, 0); | |
+ | |
+ // `this' might be patched. | |
+ if (index == 0 && global_receiver_) | |
+ return global_receiver_; | |
+ | |
+ int num_parameters = info()->num_parameters() + 4; | |
+ llvm::Function::arg_iterator it = function_->arg_begin(); | |
+ // First off, skip first 2 parameters: context (rsi) | |
+ // and callee's JSFunction object (rdi). | |
+ // Now, I couldn't find a way to tweak the calling convention through LLVM | |
+ // in a way that parameters are passed left-to-right on the stack. | |
+ // So for now they are passed right-to-left, as in cdecl. | |
+ // And therefore we do the magic here. | |
+ index = -index; | |
+ while (--index + num_parameters > 0) ++it; | |
+ | |
+ return it; | |
+} | |
+ | |
+void LLVMChunkBuilder::DoParameter(HParameter* instr) { | |
+ instr->set_llvm_value(GetParameter(instr->index())); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { | |
+ // There are no real uses of the arguments object. | |
+ // arguments.length and element access are supported directly on | |
+ // stack arguments, and any real arguments object use causes a bailout. | |
+ // So this value is never used. | |
+ return; | |
+} | |
+ | |
+void LLVMChunkBuilder::DoGoto(HGoto* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoSimulate(HSimulate* instr) { | |
+ // The “Simulate” instructions are for keeping track of what the stack | |
+ // machine state would be, in case we need to bail out and start using | |
+ // unoptimized code. They don’t generate any actual machine instructions. | |
+ | |
+ // Seems to be the right implementation (same as for Lithium) | |
+ instr->ReplayEnvironment(current_block_->last_environment()); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoStackCheck(HStackCheck* instr) { | |
+#ifdef DEBUG | |
+ std::cerr << __FUNCTION__ << std::endl; | |
+#endif | |
+// LLVMContext& llvm_context = LLVMGranularity::getInstance().context(); | |
+// llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(module_.get(), | |
+// llvm::Intrinsic::read_register, { Types::i64 }); | |
+// auto metadata = | |
+// llvm::MDNode::get(llvm_context, llvm::MDString::get(llvm_context, "rsp")); | |
+// llvm::MetadataAsValue* val = llvm::MetadataAsValue::get( | |
+// llvm_context, metadata); | |
+// llvm::Value* rsp_value = __ CreateCall(intrinsic, val); | |
+// auto above_equal = CompareRoot(rsp_value, Heap::kStackLimitRootIndex, | |
+// llvm::CmpInst::ICMP_UGE); | |
+// Assert(above_equal); | |
+} | |
+ | |
+void LLVMChunkBuilder::CallStackMap(int stackmap_id, llvm::Value* value) { | |
+ auto vector = std::vector<llvm::Value*>(1, value); | |
+ CallStackMap(stackmap_id, vector); | |
+} | |
+ | |
+void LLVMChunkBuilder::CallStackMap(int stackmap_id, | |
+ std::vector<llvm::Value*>& values) { | |
+ llvm::Function* stackmap = llvm::Intrinsic::getDeclaration( | |
+ module_.get(), llvm::Intrinsic::experimental_stackmap); | |
+ std::vector<llvm::Value*> mapped_values; | |
+ mapped_values.push_back(__ getInt64(stackmap_id)); | |
+ int shadow_bytes = 0; | |
+ mapped_values.push_back(__ getInt32(shadow_bytes)); | |
+ mapped_values.insert(mapped_values.end(), values.begin(), values.end()); | |
+ __ CreateCall(stackmap, mapped_values); | |
+} | |
+ | |
+// Note: we don't set calling convention here. | |
+// We return the call instruction so the caller can do it. | |
+llvm::CallInst* LLVMChunkBuilder::CallPatchPoint( | |
+ int32_t stackmap_id, | |
+ llvm::Value* target_function, | |
+ std::vector<llvm::Value*>& function_args, | |
+ std::vector<llvm::Value*>& live_values, | |
+ int covering_nop_size) { | |
+ llvm::Function* patchpoint = llvm::Intrinsic::getDeclaration( | |
+ module_.get(), llvm::Intrinsic::experimental_patchpoint_i64); | |
+ | |
+ auto llvm_patchpoint_id = __ getInt64(stackmap_id); | |
+ auto nop_size = __ getInt32(covering_nop_size); | |
+ auto num_args = __ getInt32(IntHelper::AsUInt32(function_args.size())); | |
+ | |
+ std::vector<llvm::Value*> patchpoint_args = | |
+ { llvm_patchpoint_id, nop_size, target_function, num_args }; | |
+ | |
+ patchpoint_args.insert(patchpoint_args.end(), | |
+ function_args.begin(), function_args.end()); | |
+ patchpoint_args.insert(patchpoint_args.end(), | |
+ live_values.begin(), live_values.end()); | |
+ | |
+ return __ CreateCall(patchpoint, patchpoint_args); | |
+} | |
+ | |
+ | |
+// Returns the value of gc.result (call instruction would be irrelevant). | |
+llvm::Value* LLVMChunkBuilder::CallStatePoint( | |
+ int32_t stackmap_id, | |
+ llvm::Value* target_function, | |
+ llvm::CallingConv::ID calling_conv, | |
+ std::vector<llvm::Value*>& function_args, | |
+ int covering_nop_size) { | |
+ | |
+ auto return_type = Types::tagged; | |
+ | |
+ // The statepoint intrinsic is overloaded by the function pointer type. | |
+ std::vector<llvm::Type*> params; | |
+ for (int i = 0; i < function_args.size(); i++) | |
+ params.push_back(function_args[i]->getType()); | |
+ llvm::FunctionType* function_type = llvm::FunctionType::get( | |
+ return_type, params, false); | |
+ auto function_type_ptr = function_type->getPointerTo(); | |
+ llvm::Type* statepoint_arg_types[] = | |
+ { llvm::cast<llvm::PointerType>(function_type_ptr) }; | |
+ | |
+ auto casted_target = __ CreateBitOrPointerCast(target_function, | |
+ function_type_ptr); | |
+ | |
+ llvm::Function* statepoint = llvm::Intrinsic::getDeclaration( | |
+ module_.get(), llvm::Intrinsic::experimental_gc_statepoint, | |
+ statepoint_arg_types); | |
+ | |
+ auto llvm_patchpoint_id = __ getInt64(stackmap_id); | |
+ auto nop_size = __ getInt32(covering_nop_size); | |
+ auto num_args = __ getInt32(IntHelper::AsUInt32(function_args.size())); | |
+ auto flags = __ getInt32(0); | |
+ auto num_transition_args = __ getInt32(0); | |
+ auto num_deopt_args = __ getInt32(0); | |
+ | |
+ std::vector<llvm::Value*> statepoint_args = | |
+ { llvm_patchpoint_id, nop_size, casted_target, num_args, flags }; | |
+ | |
+ statepoint_args.insert(statepoint_args.end(), | |
+ function_args.begin(), function_args.end()); | |
+ | |
+ statepoint_args.insert(statepoint_args.end(), | |
+ { num_transition_args, num_deopt_args }); | |
+ | |
+ auto token = __ CreateCall(statepoint, statepoint_args); | |
+ token->setCallingConv(calling_conv); | |
+ | |
+ llvm::Function* gc_result = llvm::Intrinsic::getDeclaration( | |
+ module_.get(), llvm::Intrinsic::experimental_gc_result, { return_type }); | |
+ | |
+ return __ CreateCall(gc_result, { token }); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::RecordRelocInfo(uint64_t intptr_value, | |
+ RelocInfo::Mode rmode) { | |
+ bool extended = false; | |
+ if (is_uint32(intptr_value)) { | |
+ intptr_value = (intptr_value << 32) | kExtFillingValue; | |
+ extended = true; | |
+ } | |
+ | |
+ // Here we use the intptr_value (data) only to identify the entry in the map | |
+ RelocInfo rinfo(rmode, intptr_value); | |
+ LLVMRelocationData::ExtendedInfo meta_info; | |
+ meta_info.cell_extended = extended; | |
+ reloc_data_->Add(rinfo, meta_info); | |
+ | |
+ bool is_var_arg = false; | |
+ auto return_type = Types::tagged; | |
+ auto param_types = { Types::i64 }; | |
+ auto func_type = llvm::FunctionType::get(return_type, param_types, | |
+ is_var_arg); | |
+ // AT&T syntax. | |
+ const char* asm_string = "movabsq $1, $0"; | |
+ // i = 64-bit integer (on x64), q = register (like r, but more regs allowed). | |
+ const char* constraints = "=q,i,~{dirflag},~{fpsr},~{flags}"; | |
+ bool has_side_effects = true; | |
+ llvm::InlineAsm* inline_asm = llvm::InlineAsm::get(func_type, | |
+ asm_string, | |
+ constraints, | |
+ has_side_effects); | |
+ llvm::BasicBlock* current_block = __ GetInsertBlock(); | |
+ auto last_instr = current_block-> getTerminator(); | |
+ // if block has terminator we must insert before last instruction | |
+ if (!last_instr) | |
+ return __ CreateCall(inline_asm, __ getInt64(intptr_value)); | |
+ auto call = llvm::CallInst::Create(inline_asm, __ getInt64(intptr_value), "reloc", last_instr); | |
+ return call; | |
+} | |
+ | |
+void LLVMChunkBuilder::DoConstant(HConstant* instr) { | |
+ llvm::Value* const_value = CreateConstant(instr, current_block_); | |
+ instr->set_llvm_value(const_value); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoReturn(HReturn* instr) { | |
+ if (info()->IsStub()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ if (info()->saves_caller_doubles()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ // see NeedsEagerFrame() in lithium-codegen. For now here it's always true. | |
+ DCHECK(!info()->IsStub()); | |
+ // I don't know what the absence (= 0) of this field means | |
+ DCHECK(instr->parameter_count()); | |
+ if (instr->parameter_count()->IsConstant()) { | |
+ llvm::Value* ret_val = Use(instr->value()); | |
+ __ CreateRet(ret_val); | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { | |
+ __ CreateUnreachable(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoAdd(HAdd* instr) { | |
+ if(instr->representation().IsSmiOrInteger32()) { | |
+ DCHECK(instr->left()->representation().Equals(instr->representation())); | |
+ DCHECK(instr->right()->representation().Equals(instr->representation())); | |
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); | |
+ HValue* left = instr->left(); | |
+ HValue* right = instr->right(); | |
+ llvm::Value* llvm_left = Use(left); | |
+ llvm::Value* llvm_right = Use(right); | |
+ if (!can_overflow) { | |
+ llvm::Value* Add = __ CreateAdd(llvm_left, llvm_right, ""); | |
+ instr->set_llvm_value(Add); | |
+ } else { | |
+ auto type = instr->representation().IsSmi() ? Types::i64 : Types::i32; | |
+ llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::sadd_with_overflow, type); | |
+ | |
+ llvm::Value* params[] = { llvm_left, llvm_right }; | |
+ llvm::Value* call = __ CreateCall(intrinsic, params); | |
+ | |
+ llvm::Value* sum = __ CreateExtractValue(call, 0); | |
+ llvm::Value* overflow = __ CreateExtractValue(call, 1); | |
+ instr->set_llvm_value(sum); | |
+ DeoptimizeIf(overflow, Deoptimizer::kOverflow); | |
+ } | |
+ } else if (instr->representation().IsDouble()) { | |
+ DCHECK(instr->left()->representation().IsDouble()); | |
+ DCHECK(instr->right()->representation().IsDouble()); | |
+ HValue* left = instr->BetterLeftOperand(); | |
+ HValue* right = instr->BetterRightOperand(); | |
+ llvm::Value* fadd = __ CreateFAdd(Use(left), Use(right)); | |
+ instr->set_llvm_value(fadd); | |
+ } else if (instr->representation().IsExternal()) { | |
+ //TODO: not tested string-validate-input.js in doTest | |
+ DCHECK(instr->IsConsistentExternalRepresentation()); | |
+ DCHECK(!instr->CheckFlag(HValue::kCanOverflow)); | |
+ //FIXME: possibly wrong | |
+ llvm::Value* left_as_i64 = __ CreateBitOrPointerCast(Use(instr->left()), | |
+ Types::i64); | |
+ llvm::Value* right_as_i64 = __ CreateBitOrPointerCast(Use(instr->right()), | |
+ Types::i64); | |
+ llvm::Value* sum = __ CreateAdd(left_as_i64, right_as_i64); | |
+ llvm::Value* sum_as_external = __ CreateBitOrPointerCast( | |
+ sum, GetLLVMType(Representation::External())); | |
+ instr->set_llvm_value(sum_as_external); | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoAllocateBlockContext(HAllocateBlockContext* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+ | |
+llvm::Value* LLVMChunkBuilder::Allocate(llvm::Value* object_size, | |
+ llvm::Value* (LLVMChunkBuilder::*fptr) | |
+ (HValue* instr, | |
+ llvm::Value* temp), | |
+ AllocationFlags flags, | |
+ HValue* instr, | |
+ llvm::Value* temp){ | |
+ DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); | |
+// DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); | |
+ if (!FLAG_inline_new) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ | |
+ // Load address of new object into result. | |
+ llvm::Value* result = LoadAllocationTopHelper(flags); | |
+ | |
+ if ((flags & DOUBLE_ALIGNMENT) != 0) { | |
+ if (kPointerSize == kDoubleSize) { | |
+ if (FLAG_debug_code) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ } | |
+ | |
+ // Calculate new top and bail out if new space is exhausted. | |
+ ExternalReference allocation_limit = | |
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags); | |
+ llvm::BasicBlock* not_carry = NewBlock("Allocate add is correct"); | |
+ llvm::BasicBlock* merge = NewBlock("Allocate merge"); | |
+ llvm::BasicBlock* deferred = NewBlock("Allocate deferred"); | |
+ llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::uadd_with_overflow, Types::i64); | |
+ llvm::Value* size = __ CreateIntCast(object_size, Types::i64, true); | |
+ llvm::Value* params[] = { result, size }; | |
+ llvm::Value* call = __ CreateCall(intrinsic, params); | |
+ llvm::Value* sum = __ CreateExtractValue(call, 0); | |
+ llvm::Value* overflow = __ CreateExtractValue(call, 1); | |
+ __ CreateCondBr(overflow, deferred, not_carry); | |
+ | |
+ __ SetInsertPoint(not_carry); | |
+ llvm::Value* top_address = __ getInt64(reinterpret_cast<uint64_t> | |
+ (allocation_limit.address())); | |
+ llvm::Value* address = __ CreateIntToPtr(top_address, Types::ptr_i64); | |
+ llvm::Value* limit_operand = __ CreateLoad(address); | |
+ llvm::BasicBlock* limit_is_valid = NewBlock("Allocate limit is valid"); | |
+ llvm::Value* cmp_limit = __ CreateICmpUGT(sum, limit_operand); | |
+ __ CreateCondBr(cmp_limit, deferred, limit_is_valid); | |
+ | |
+ __ SetInsertPoint(limit_is_valid); | |
+ // Update allocation top. | |
+ UpdateAllocationTopHelper(sum, flags); | |
+ bool tag_result = (flags & TAG_OBJECT) != 0; | |
+ llvm::Value* final_result = nullptr; | |
+ if (tag_result) { | |
+ llvm::Value* inc = __ CreateAdd(result, __ getInt64(1)); | |
+ final_result = __ CreateIntToPtr(inc, Types::tagged); | |
+ __ CreateBr(merge); | |
+ } else { | |
+ final_result = __ CreateIntToPtr(result, Types::tagged); | |
+ __ CreateBr(merge); | |
+ } | |
+ | |
+ __ SetInsertPoint(deferred); | |
+ llvm::Value* deferred_result = (this->*fptr)(instr, temp); | |
+ __ CreateBr(merge); | |
+ | |
+ __ SetInsertPoint(merge); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::tagged, 2); | |
+ phi->addIncoming(final_result, limit_is_valid); | |
+ phi->addIncoming(deferred_result, deferred); | |
+ return phi; | |
+} | |
+ | |
+ | |
+llvm::Value* LLVMChunkBuilder::AllocateSlow(HValue* obj, llvm::Value* temp){ | |
+ HAllocate* instr = HAllocate::cast(obj); | |
+ std::vector<llvm::Value*> args; | |
+ llvm::Value* arg1 = Integer32ToSmi(instr->size()); | |
+ int flags = 0; | |
+ if (instr->IsOldSpaceAllocation()) { | |
+ DCHECK(!instr->IsNewSpaceAllocation()); | |
+ flags = AllocateTargetSpace::update(flags, OLD_SPACE); | |
+ } else { | |
+ flags = AllocateTargetSpace::update(flags, NEW_SPACE); | |
+ } | |
+ | |
+ llvm::Value* value = __ getInt32(flags); | |
+ llvm::Value* arg2 = Integer32ToSmi(value); | |
+ args.push_back(arg2); | |
+ args.push_back(arg1); | |
+ llvm::Value* alloc = CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, | |
+ Use(instr->context()), args); | |
+ return alloc; | |
+} | |
+ | |
+ | |
+void LLVMChunkBuilder::DoAllocate(HAllocate* instr) { | |
+ // Allocate memory for the object. | |
+ AllocationFlags flags = TAG_OBJECT; | |
+ if (instr->MustAllocateDoubleAligned()) { | |
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); | |
+ } | |
+ if (instr->IsOldSpaceAllocation()) { | |
+ DCHECK(!instr->IsNewSpaceAllocation()); | |
+ flags = static_cast<AllocationFlags>(flags | PRETENURE); | |
+ } | |
+ if (instr->MustPrefillWithFiller()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ | |
+ DCHECK(instr->size()->representation().IsInteger32()); | |
+ llvm::Value* size = Use(instr->size()); | |
+ llvm::Value* (LLVMChunkBuilder::*fptr)(HValue*, llvm::Value*); | |
+ fptr = &LLVMChunkBuilder::AllocateSlow; | |
+ llvm::Value* res = Allocate(size, fptr, flags, instr); | |
+ instr->set_llvm_value(res); | |
+} | |
+ | |
+ | |
+void LLVMChunkBuilder::DoApplyArguments(HApplyArguments* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoArgumentsElements(HArgumentsElements* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoBitwise(HBitwise* instr) { | |
+ DCHECK(instr->left()->representation().Equals(instr->representation())); | |
+ DCHECK(instr->right()->representation().Equals(instr->representation())); | |
+ llvm::Value* left = Use(instr->left()); | |
+ llvm::Value* right = Use(instr->right()); | |
+ if (instr->representation().IsSmiOrInteger32()) { | |
+ switch (instr->op()) { | |
+ case Token::BIT_AND: { | |
+ instr->set_llvm_value(__ CreateAnd(left, right)); | |
+ break; | |
+ } | |
+ case Token::BIT_OR: { | |
+ instr->set_llvm_value(__ CreateOr(left, right)); | |
+ break; | |
+ } | |
+ case Token::BIT_XOR: { | |
+ instr->set_llvm_value(__ CreateXor(left, right)); | |
+ break; | |
+ } | |
+ default: | |
+ UNREACHABLE(); | |
+ break; | |
+ } | |
+ } else { | |
+ // TODO(llvm): refactor (DRY) -- same code in DoSub. | |
+ AllowHandleAllocation allow_handles; | |
+ Handle<Code> code = | |
+ CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code(); | |
+ std::vector<llvm::Value*> params { Use(instr->context()), left, right }; | |
+ auto call_ic = CallCode(code, llvm::CallingConv::X86_64_V8_S10, params); | |
+ instr->set_llvm_value(call_ic); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { | |
+ DCHECK(instr->HasNoUses()); // if it fails, see what llvm_value is appropriate | |
+ Representation representation = instr->length()->representation(); | |
+ DCHECK(representation.Equals(instr->index()->representation())); | |
+ DCHECK(representation.IsSmiOrInteger32()); | |
+ USE(representation); | |
+ | |
+ if (instr->length()->IsConstant() && instr->index()->IsConstant()) { | |
+ auto length = instr->length()->GetInteger32Constant(); | |
+ auto index = instr->index()->GetInteger32Constant(); | |
+ // Avoid stackmap creation (happens upon DeoptimizeIf call). | |
+ if (index < length || (instr->allow_equality() && index == length)) { | |
+ instr->set_llvm_value(nullptr); // TODO(llvm): incorrect if instr has uses | |
+ DCHECK(instr->HasNoUses()); | |
+ return; | |
+ } | |
+ } | |
+ | |
+ llvm::Value* length = Use(instr->length()); | |
+ llvm::Value* index = Use(instr->index()); | |
+ | |
+ // FIXME(llvm): signed comparison makes sense. Or does it? | |
+ auto cc = instr->allow_equality() | |
+ ? llvm::CmpInst::ICMP_SLE : llvm::CmpInst::ICMP_SLT; | |
+ | |
+ llvm::Value* compare = __ CreateICmp(cc, index, length); | |
+ if (FLAG_debug_code && instr->skip_check()) { | |
+ UNIMPLEMENTED(); | |
+ } else { | |
+ bool negate = true; | |
+ DeoptimizeIf(compare, Deoptimizer::kOutOfBounds, negate); | |
+ } | |
+ instr->set_llvm_value(compare); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoBoundsCheckBaseIndexInformation( | |
+ HBoundsCheckBaseIndexInformation* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::BranchTagged(HBranch* instr, | |
+ ToBooleanStub::Types expected, | |
+ llvm::BasicBlock* true_target, | |
+ llvm::BasicBlock* false_target) { | |
+ llvm::Value* value = Use(instr->value()); | |
+ llvm::Value* value_as_i64 = __ CreateBitOrPointerCast(value, Types::i64); | |
+ | |
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); | |
+ | |
+ std::vector<llvm::BasicBlock*> check_blocks; | |
+ for (auto i = ToBooleanStub::UNDEFINED; | |
+ i < ToBooleanStub::NUMBER_OF_TYPES; | |
+ i = static_cast<ToBooleanStub::Type>(i + 1)) { | |
+ if (i == ToBooleanStub::SMI){ | |
+ if (expected.Contains(i)){ | |
+ check_blocks.push_back(NewBlock("BranchTagged Check Block")); | |
+ } else if (expected.NeedsMap()){ | |
+ check_blocks.push_back(NewBlock("BranchTagged NeedsMapCont")); | |
+ } | |
+ if (expected.NeedsMap()) { | |
+ if (!expected.CanBeUndetectable()) | |
+ check_blocks.push_back(NewBlock("BranchTagged NonUndetachable")); | |
+ else | |
+ check_blocks.push_back(NewBlock("BranchTagged CanBeUndetectable")); | |
+ } | |
+ } else if (expected.Contains(i)) { | |
+ check_blocks.push_back(NewBlock("BranchTagged Check Block")); | |
+ } | |
+ } | |
+ llvm::BasicBlock* merge_block = NewBlock("BranchTagged Merge Block"); | |
+ check_blocks.push_back(merge_block); | |
+ | |
+ DCHECK(check_blocks.size() > 1); | |
+ unsigned cur_block = 0; | |
+ llvm::BasicBlock* next = check_blocks[cur_block]; | |
+ __ CreateBr(check_blocks[cur_block]); | |
+ | |
+ if (expected.Contains(ToBooleanStub::UNDEFINED)) { | |
+ __ SetInsertPoint(check_blocks[cur_block]); | |
+ // undefined -> false. | |
+ auto is_undefined = CompareRoot(value, Heap::kUndefinedValueRootIndex); | |
+ __ CreateCondBr(is_undefined, false_target, check_blocks[++cur_block]); | |
+ next = check_blocks[cur_block]; | |
+ } | |
+ | |
+ if (expected.Contains(ToBooleanStub::BOOLEAN)) { | |
+ __ SetInsertPoint(next); | |
+ // true -> true. | |
+ auto is_true = CompareRoot(value, Heap::kTrueValueRootIndex); | |
+ llvm::BasicBlock* bool_second = NewBlock("BranchTagged Boolean Second Check"); | |
+ __ CreateCondBr(is_true, true_target, bool_second); | |
+ // false -> false. | |
+ __ SetInsertPoint(bool_second); | |
+ auto is_false = CompareRoot(value, Heap::kFalseValueRootIndex); | |
+ __ CreateCondBr(is_false, false_target, check_blocks[++cur_block]); | |
+ next = check_blocks[cur_block]; | |
+ } | |
+ | |
+ if (expected.Contains(ToBooleanStub::NULL_TYPE)) { | |
+ __ SetInsertPoint(next); | |
+ // 'null' -> false. | |
+ auto is_null = CompareRoot(value, Heap::kNullValueRootIndex); | |
+ __ CreateCondBr(is_null, false_target, check_blocks[++cur_block]); | |
+ next = check_blocks[cur_block]; | |
+ } | |
+ | |
+ // TODO: Test (till the end) 3d-cube.js DrawQube | |
+ if (expected.Contains(ToBooleanStub::SMI)) { | |
+ __ SetInsertPoint(next); | |
+ // Smis: 0 -> false, all other -> true. | |
+ llvm::BasicBlock* not_zero = NewBlock("BranchTagged Smi Non Zero"); | |
+ auto cmp_zero = __ CreateICmpEQ(value_as_i64, __ getInt64(0)); | |
+ __ CreateCondBr(cmp_zero, false_target, not_zero); | |
+ __ SetInsertPoint(not_zero); | |
+ llvm::Value* smi_cond = SmiCheck(value, false); | |
+ __ CreateCondBr(smi_cond, true_target, check_blocks[++cur_block]); | |
+ next = check_blocks[cur_block]; | |
+ } else if (expected.NeedsMap()) { | |
+ // If we need a map later and have a Smi -> deopt. | |
+ //TODO: Not tested, string-fasta fastaRandom | |
+ __ SetInsertPoint(next); | |
+ auto smi_and = __ CreateAnd(value_as_i64, __ getInt64(kSmiTagMask)); | |
+ auto is_smi = __ CreateICmpEQ(smi_and, __ getInt64(0)); | |
+ DeoptimizeIf(is_smi, Deoptimizer::kSmi, false, check_blocks[++cur_block]); | |
+ next = check_blocks[cur_block]; | |
+ } | |
+ | |
+ llvm::Value* map = nullptr; | |
+ if (expected.NeedsMap()) { | |
+ __ SetInsertPoint(next); | |
+ map = LoadFieldOperand(value, HeapObject::kMapOffset); | |
+ if (!expected.CanBeUndetectable()) { | |
+ __ CreateBr(check_blocks[++cur_block]); | |
+ next = check_blocks[cur_block]; | |
+ } else { | |
+ auto map_bit_offset = LoadFieldOperand(map, Map::kBitFieldOffset); | |
+ auto int_map_bit_offset = __ CreatePtrToInt(map_bit_offset, Types::i64); | |
+ auto map_detach = __ getInt64(1 << Map::kIsUndetectable); | |
+ auto test = __ CreateAnd(int_map_bit_offset, map_detach); | |
+ auto cmp_zero = __ CreateICmpEQ(test, __ getInt64(0)); | |
+ __ CreateCondBr(cmp_zero, check_blocks[++cur_block], false_target); | |
+ next = check_blocks[cur_block]; | |
+ } | |
+ } | |
+ | |
+ if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { | |
+ // spec object -> true. | |
+ DCHECK(map); //FIXME: map can be null here | |
+ __ SetInsertPoint(next); | |
+ llvm::Value* cmp_instance = CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE, llvm::CmpInst::ICMP_UGE); | |
+ __ CreateCondBr(cmp_instance, true_target, check_blocks[++cur_block]); | |
+ next = check_blocks[cur_block]; | |
+ } | |
+ | |
+ if (expected.Contains(ToBooleanStub::STRING)) { | |
+ // String value -> false iff empty. | |
+ DCHECK(map); //FIXME: map can be null here | |
+ __ SetInsertPoint(next); | |
+ llvm::BasicBlock* is_string_bb = NewBlock("BranchTagged ToBoolString IsString"); | |
+ llvm::Value* cmp_instance = CmpInstanceType(map, FIRST_NONSTRING_TYPE, llvm::CmpInst::ICMP_UGE); | |
+ __ CreateCondBr(cmp_instance, check_blocks[++cur_block], is_string_bb); | |
+ __ SetInsertPoint(is_string_bb); | |
+ next = check_blocks[cur_block]; | |
+ auto str_length = LoadFieldOperand(value, String::kLengthOffset); | |
+ auto casted_str_length = __ CreatePtrToInt(str_length, Types::i64); | |
+ auto cmp_length = __ CreateICmpEQ(casted_str_length, __ getInt64(0)); | |
+ __ CreateCondBr(cmp_length, false_target, true_target); | |
+ } | |
+ | |
+ if (expected.Contains(ToBooleanStub::SIMD_VALUE)) { | |
+ // SIMD value -> true. | |
+ DCHECK(map); //FIXME: map can be null here | |
+ __ SetInsertPoint(next); | |
+ llvm::Value* cmp_simd = CmpInstanceType(map, SIMD128_VALUE_TYPE); | |
+ __ CreateCondBr(cmp_simd, true_target, check_blocks[++cur_block]); | |
+ next = check_blocks[cur_block]; | |
+ } | |
+ | |
+ if (expected.Contains(ToBooleanStub::SYMBOL)) { | |
+ // Symbol value -> true. | |
+ __ SetInsertPoint(next); | |
+ DCHECK(map); //FIXME: map can be null here | |
+ llvm::Value* cmp_instance = CmpInstanceType(map, SYMBOL_TYPE, llvm::CmpInst::ICMP_EQ); | |
+ __ CreateCondBr(cmp_instance, true_target, check_blocks[++cur_block]); | |
+ next = check_blocks[cur_block]; | |
+ } | |
+ | |
+ if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { | |
+ // heap number -> false iff +0, -0, or NaN. | |
+ DCHECK(map); //FIXME: map can be null here | |
+ __ SetInsertPoint(next); | |
+ llvm::BasicBlock* is_heap_bb = NewBlock("BranchTagged ToBoolString IsHeapNumber"); | |
+ auto cmp_root = CompareRoot(map, Heap::kHeapNumberMapRootIndex, llvm::CmpInst::ICMP_NE); | |
+ __ CreateCondBr(cmp_root, check_blocks[++cur_block], is_heap_bb); | |
+ __ SetInsertPoint(is_heap_bb); | |
+ next = check_blocks[cur_block]; | |
+ llvm::Value* zero_val = llvm::ConstantFP::get(Types::float64, 0); | |
+ auto value_addr = FieldOperand(value, HeapNumber::kValueOffset); | |
+ llvm::Value* value_as_double_addr = __ CreateBitCast(value_addr, | |
+ Types::ptr_float64); | |
+ auto load_val = __ CreateLoad(value_as_double_addr); | |
+ | |
+ llvm::Value* compare = __ CreateFCmpOEQ(load_val, zero_val); | |
+ __ CreateCondBr(compare, false_target, true_target); | |
+ } | |
+ | |
+ __ SetInsertPoint(next) ; // TODO(llvm): not sure | |
+ | |
+ if (!expected.IsGeneric()) { | |
+ // We've seen something for the first time -> deopt. | |
+ // This can only happen if we are not generic already. | |
+ auto no_condition = __ getTrue(); | |
+ DeoptimizeIf(no_condition, Deoptimizer::kUnexpectedObject); | |
+ // Since we deoptimize on True the continue block is never reached. | |
+ __ CreateUnreachable(); | |
+ } else { | |
+ // TODO(llvm): not sure | |
+ __ CreateUnreachable(); | |
+ } | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::CmpInstanceType(llvm::Value* map, | |
+ InstanceType type, | |
+ llvm::CmpInst::Predicate predicate) { | |
+ llvm::Value* field_operand = LoadFieldOperand(map, Map::kInstanceTypeOffset); | |
+ llvm::Value* int_field_operand = __ CreatePtrToInt(field_operand, Types::i64); | |
+ llvm::Value* int_type = __ getInt64(static_cast<int>(type)); | |
+ llvm::Value* cmp_result = __ CreateICmp(predicate, int_field_operand, | |
+ int_type, "CmpInstanceType"); | |
+ return cmp_result; | |
+} | |
+ | |
+void LLVMChunkBuilder::DoBranch(HBranch* instr) { | |
+ HValue* value = instr->value(); | |
+ llvm::BasicBlock* true_target = Use(instr->SuccessorAt(0)); | |
+ llvm::BasicBlock* false_target = Use(instr->SuccessorAt(1)); | |
+ Representation r = value->representation(); | |
+ HType type = value->type(); | |
+ USE(type); | |
+ if (r.IsInteger32()) { | |
+ llvm::Value* zero = __ getInt32(0); | |
+ llvm::Value* compare = __ CreateICmpNE(Use(value), zero); | |
+ llvm::BranchInst* branch = __ CreateCondBr(compare, | |
+ true_target, false_target); | |
+ instr->set_llvm_value(branch); | |
+ } else if (r.IsSmi()) { | |
+ UNIMPLEMENTED(); | |
+ } else if (r.IsDouble()) { | |
+ llvm::Value* zero = llvm::ConstantFP::get(Types::float64, 0); | |
+ llvm::Value* compare = __ CreateFCmpUNE(Use(value), zero); | |
+ llvm::BranchInst* branch = __ CreateCondBr(compare, | |
+ true_target, false_target); | |
+ instr->set_llvm_value(branch); | |
+ } else { | |
+ DCHECK(r.IsTagged()); | |
+ llvm::Value* value = Use(instr->value()); | |
+ if (type.IsBoolean()) { | |
+ DCHECK(!info()->IsStub()); | |
+ llvm::Value* cmp_root = CompareRoot(value, Heap::kTrueValueRootIndex); | |
+ llvm::BranchInst* branch = __ CreateCondBr(cmp_root, true_target, false_target); | |
+ instr->set_llvm_value(branch); | |
+ } else if (type.IsString()) { | |
+ DCHECK(!info()->IsStub()); | |
+ llvm::Value* zero = __ getInt64(0); | |
+ llvm::Value* length = LoadFieldOperand(value, String::kLengthOffset); | |
+ llvm::Value* casted_length = __ CreatePtrToInt(length, Types::i64); | |
+ llvm::Value* compare = __ CreateICmpNE(casted_length, zero); | |
+ llvm::BranchInst* branch = __ CreateCondBr(compare, true_target, | |
+ false_target); | |
+ instr->set_llvm_value(branch); | |
+ } else if (type.IsSmi() || type.IsJSArray() || type.IsHeapNumber()) { | |
+ UNIMPLEMENTED(); | |
+ } else { | |
+ ToBooleanStub::Types expected = instr->expected_input_types(); | |
+ BranchTagged(instr, expected, true_target, false_target); | |
+ } | |
+ } | |
+} | |
+ | |
+llvm::CallingConv::ID LLVMChunkBuilder::GetCallingConv(CallInterfaceDescriptor descriptor) { | |
+ if (descriptor.GetRegisterParameterCount() == 4) { | |
+ if (descriptor.GetRegisterParameter(0).is(rdi) && | |
+ descriptor.GetRegisterParameter(1).is(rbx) && | |
+ descriptor.GetRegisterParameter(2).is(rcx) && | |
+ descriptor.GetRegisterParameter(3).is(rdx)) | |
+ return llvm::CallingConv::X86_64_V8_S1; | |
+ return -1; | |
+ } | |
+ if (descriptor.GetRegisterParameterCount() == 3) { | |
+ //FIXME: // Change CallingConv | |
+ if (descriptor.GetRegisterParameter(0).is(rdi) && | |
+ descriptor.GetRegisterParameter(1).is(rax) && | |
+ descriptor.GetRegisterParameter(2).is(rbx)) | |
+ return llvm::CallingConv::X86_64_V8_S3; | |
+ } | |
+ if (descriptor.GetRegisterParameterCount() == 1) { | |
+ if (descriptor.GetRegisterParameter(0).is(rax)) | |
+ return llvm::CallingConv::X86_64_V8_S11; | |
+ if (descriptor.GetRegisterParameter(0).is(rbx)) | |
+ return llvm::CallingConv::X86_64_V8_S8; | |
+ return -1; | |
+ } | |
+ return -1; | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) { | |
+ CallInterfaceDescriptor descriptor = instr->descriptor(); | |
+ llvm::CallingConv::ID conv = GetCallingConv(descriptor); | |
+ // FIXME(llvm): not very good because CallingConv::ID is unsigned. | |
+ if (conv == -1) UNIMPLEMENTED(); | |
+ | |
+ //TODO: Do wee need this check here? | |
+ if (descriptor.GetRegisterParameterCount() != instr->OperandCount() - 2) UNIMPLEMENTED(); | |
+ HValue* target = instr->target(); | |
+ // TODO(llvm): how about a zone list? | |
+ std::vector<llvm::Value*> params; | |
+ for (int i = 1; i < instr->OperandCount(); i++) | |
+ params.push_back(Use(instr->OperandAt(i))); | |
+ | |
+ for (int i = pending_pushed_args_.length() - 1; i >= 0; i--) | |
+ params.push_back(pending_pushed_args_[i]); | |
+ pending_pushed_args_.Clear(); | |
+ | |
+ if (instr->IsTailCall()) { | |
+ // Well, may be llvm can grok it's a tail call. | |
+ // This branch just needs a test. | |
+ UNIMPLEMENTED(); | |
+ } else { | |
+ // TODO(llvm):: | |
+ // LPointerMap* pointers = instr->pointer_map(); | |
+ // SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | |
+ | |
+ if (target->IsConstant()) { | |
+ Handle<Object> handle = HConstant::cast(target)->handle(isolate()); | |
+ Handle<Code> code = Handle<Code>::cast(handle); | |
+ // TODO(llvm, gc): reloc info mode of the code (CODE_TARGET)... | |
+ llvm::Value* call = CallCode(code, conv, params); | |
+ instr->set_llvm_value(call); | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ // codegen_->RecordSafepoint(pointers_, deopt_mode_); (AfterCall) | |
+ } | |
+ | |
+ // TODO(llvm): MarkAsCall(DefineFixed(result, rax), instr); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoPushArguments(HPushArguments* instr) { | |
+ // Every push must be followed with a call. | |
+ CHECK(pending_pushed_args_.is_empty()); | |
+ for (int i = 0; i < instr->OperandCount(); i++) | |
+ pending_pushed_args_.Add(Use(instr->argument(i)), info()->zone()); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCallJSFunction(HCallJSFunction* instr) { | |
+ // Code that follows relies on this assumption | |
+ // (well, maybe it's not, we haven't seen a test case yet) | |
+ if (!instr->function()->IsConstant()) UNIMPLEMENTED(); | |
+ // TODO(llvm): self call | |
+ | |
+ // TODO(llvm): record safepoints... | |
+ auto function_object = Use(instr->function()); // It's an int constant (a ptr) | |
+ auto target_entry = LoadFieldOperand(function_object, | |
+ JSFunction::kCodeEntryOffset, | |
+ "target_entry"); | |
+ auto target_context = LoadFieldOperand(function_object, | |
+ JSFunction::kContextOffset, | |
+ "target_context"); | |
+ | |
+ int actual_arg_count = 4; //rsi, rdi, rbx (OSR), rax | |
+ auto argument_count = instr->argument_count() + actual_arg_count; | |
+ //TODO:// get rid of this | |
+ // Set up the actual arguments | |
+ std::vector<llvm::Value*> args(argument_count, nullptr); | |
+ args[0] = target_context; | |
+ args[1] = function_object; | |
+ args[2] = __ getInt64(0); | |
+ //FIXME: This case needs farther investigation. Do we need new Calling Convention here? | |
+ // crypto-aes AESDecryptCtr fails without this | |
+ args[3] = __ getInt64(instr->argument_count()-1); | |
+ DCHECK(pending_pushed_args_.length() + actual_arg_count == argument_count); | |
+ // The order is reverse because X86_64_V8 is not implemented quite right. | |
+ for (int i = 0; i < pending_pushed_args_.length(); i++) { | |
+ args[argument_count - 1 - i] = pending_pushed_args_[i]; | |
+ } | |
+ pending_pushed_args_.Clear(); | |
+ | |
+ bool record_safepoint = true; | |
+ auto call = CallVal(target_entry, llvm::CallingConv::X86_64_V8_E, args, | |
+ Types::tagged, record_safepoint); | |
+ instr->set_llvm_value(call); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCallFunction(HCallFunction* instr) { | |
+ int arity = instr->argument_count() - 1; | |
+ CallFunctionFlags flags = instr->function_flags(); | |
+ llvm::Value* context = Use(instr->context()); | |
+ llvm::Value* function = Use(instr->function()); | |
+ llvm::Value* result = nullptr; | |
+ | |
+ if (instr->HasVectorAndSlot()) { | |
+ AllowDeferredHandleDereference vector_structure_check; | |
+ AllowHandleAllocation allow_handles; | |
+ Handle<TypeFeedbackVector> feedback_vector = instr->feedback_vector(); | |
+ int index = feedback_vector->GetIndex(instr->slot()); | |
+ | |
+ CallICState::CallType call_type = | |
+ (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION; | |
+ | |
+ Handle<Code> ic = | |
+ CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code(); | |
+ llvm::Value* vector = MoveHeapObject(feedback_vector); | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(context); | |
+ params.push_back(function); | |
+ params.push_back(vector); | |
+ Smi* smi_index = Smi::FromInt(index); | |
+ params.push_back(ValueFromSmi(smi_index)); | |
+ for (int i = pending_pushed_args_.length()-1; i >=0; --i) | |
+ params.push_back(pending_pushed_args_[i]); | |
+ pending_pushed_args_.Clear(); | |
+ result = CallCode(ic, llvm::CallingConv::X86_64_V8_S6, | |
+ params); | |
+ } else { | |
+ CallFunctionStub stub(isolate(), arity, flags); | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap; | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(context); | |
+ params.push_back(function); | |
+ for (int i = pending_pushed_args_.length()-1; i >=0; --i) | |
+ params.push_back(pending_pushed_args_[i]); | |
+ pending_pushed_args_.Clear(); | |
+ result = CallCode(stub.GetCode(), llvm::CallingConv::X86_64_V8_S13, params); | |
+ } | |
+ instr->set_llvm_value(result); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCallNew(HCallNew* instr) { | |
+ // TODO: not tested | |
+ // FIXME: don't we need pending_push_args ? | |
+ int arity = instr->argument_count()-1; | |
+ llvm::Value* arity_val = __ getInt64(arity); | |
+ llvm::Value* load_r = LoadRoot(Heap::kUndefinedValueRootIndex); | |
+ CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); | |
+ Handle<Code> code = Handle<Code>::null(); | |
+ { | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap; | |
+ code = stub.GetCode(); | |
+ } | |
+ std::vector<llvm::Value*> params; | |
+ for (int i = 0; i < instr->OperandCount(); ++i) | |
+ params.push_back(Use(instr->OperandAt(i))); | |
+ params.push_back(arity_val); | |
+ params.push_back(load_r); | |
+ for (int i = pending_pushed_args_.length()-1; i >=0; --i) | |
+ params.push_back(pending_pushed_args_[i]); | |
+ pending_pushed_args_.Clear(); | |
+ llvm::Value* call = CallCode(code, llvm::CallingConv::X86_64_V8_S3, params); | |
+ instr->set_llvm_value(call); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCallNewArray(HCallNewArray* instr) { | |
+ //TODO: Respect RelocInfo | |
+ int arity = instr->argument_count() - 1; | |
+ llvm::Value* arity_val = __ getInt64(arity); | |
+ llvm::Value* result_packed_elem = nullptr; | |
+ llvm::BasicBlock* packed_continue = nullptr; | |
+ llvm::Value* load_root = LoadRoot(Heap::kUndefinedValueRootIndex); | |
+ ElementsKind kind = instr->elements_kind(); | |
+ AllocationSiteOverrideMode override_mode = | |
+ (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) | |
+ ? DISABLE_ALLOCATION_SITES | |
+ : DONT_OVERRIDE; | |
+ if (arity == 0) { | |
+ UNIMPLEMENTED(); | |
+ } else if (arity == 1) { | |
+ llvm::BasicBlock* done = nullptr; | |
+ llvm::BasicBlock* packed_case = NewBlock("CALL NEW ARRAY PACKED CASE"); | |
+ load_root = MoveHeapObject(instr->site()); | |
+ if (IsFastPackedElementsKind(kind)) { | |
+ packed_continue = NewBlock("CALL NEW ARRAY PACKED CASE CONTINUE"); | |
+ DCHECK_GE(pending_pushed_args_.length(), 1); | |
+ llvm::Value* first_arg = pending_pushed_args_[0]; | |
+ first_arg = __ CreateBitOrPointerCast(first_arg, Types::i64); | |
+ llvm::Value* cmp_eq = __ CreateICmpEQ(first_arg, __ getInt64(0)); | |
+ __ CreateCondBr(cmp_eq, packed_case, packed_continue); | |
+ __ SetInsertPoint(packed_continue); | |
+ ElementsKind holey_kind = GetHoleyElementsKind(kind); | |
+ ArraySingleArgumentConstructorStub stub(isolate(), | |
+ holey_kind, | |
+ override_mode); | |
+ Handle<Code> code = Handle<Code>::null(); | |
+ { | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap; | |
+ code = stub.GetCode(); | |
+ // FIXME(llvm,gc): respect reloc info mode... | |
+ } | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(Use(instr->context())); | |
+ for (int i = 1; i < instr->OperandCount(); ++i) | |
+ params.push_back(Use(instr->OperandAt(i))); | |
+ params.push_back(arity_val); | |
+ params.push_back(load_root); | |
+ for (int i = pending_pushed_args_.length() - 1; i >=0; --i) | |
+ params.push_back(pending_pushed_args_[i]); | |
+ pending_pushed_args_.Clear(); | |
+ result_packed_elem = CallCode(code, llvm::CallingConv::X86_64_V8_S3, | |
+ params); | |
+ done = NewBlock("CALL NEW ARRAY END"); | |
+ __ CreateBr(done); | |
+ } | |
+ else { | |
+ done = NewBlock("CALL NEW ARRAY END"); | |
+ __ CreateBr(packed_case); | |
+ } | |
+ //__ CreateBr(packed_case); | |
+ __ SetInsertPoint(packed_case); | |
+ ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); | |
+ Handle<Code> code = Handle<Code>::null(); | |
+ { | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap; | |
+ code = stub.GetCode(); | |
+ // FIXME(llvm,gc): respect reloc info mode... | |
+ } | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(Use(instr->context())); | |
+ for (int i = 1; i < instr->OperandCount(); ++i) | |
+ params.push_back(Use(instr->OperandAt(i))); | |
+ params.push_back(arity_val); | |
+ params.push_back(load_root); | |
+ for (int i = pending_pushed_args_.length()-1; i >=0; --i) | |
+ params.push_back(pending_pushed_args_[i]); | |
+ pending_pushed_args_.Clear(); | |
+ llvm::Value* return_val = CallCode(code, llvm::CallingConv::X86_64_V8_S3, | |
+ params); | |
+ __ CreateBr(done); | |
+ __ SetInsertPoint(done); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::tagged, result_packed_elem ? 2 : 1); | |
+ phi->addIncoming(return_val, packed_case); | |
+ if (result_packed_elem) { | |
+ DCHECK(packed_continue); | |
+ phi->addIncoming(result_packed_elem, packed_continue); | |
+ } | |
+ instr->set_llvm_value(phi); | |
+ } else { | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(Use(instr->context())); | |
+ params.push_back(Use(instr->constructor())); | |
+ params.push_back(arity_val); | |
+ params.push_back(load_root); | |
+ for (int i = pending_pushed_args_.length()-1; i >=0; --i) | |
+ params.push_back(pending_pushed_args_[i]); | |
+ pending_pushed_args_.Clear(); | |
+ | |
+ ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); | |
+ Handle<Code> code = Handle<Code>::null(); | |
+ { | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap; | |
+ code = stub.GetCode(); | |
+ } | |
+ //DCHECK(code) | |
+ auto result = CallCode(code, llvm::CallingConv::X86_64_V8_S3, params); | |
+ instr->set_llvm_value(result); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCallRuntime(HCallRuntime* instr) { | |
+ // FIXME(llvm): use instr->save_doubles() | |
+ llvm::Value* val = CallRuntime(instr->function()); | |
+ llvm::Value* tagged_val = __ CreateBitOrPointerCast(val, Types::tagged); | |
+ instr->set_llvm_value(tagged_val); | |
+ // MarkAsCall | |
+ // RecordSafepointWithLazyDeopt | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCallStub(HCallStub* instr) { | |
+ llvm::Value* context = Use(instr->context()); | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(context); | |
+ for (int i = pending_pushed_args_.length()-1; i >=0; --i) | |
+ params.push_back(pending_pushed_args_[i]); | |
+ pending_pushed_args_.Clear(); | |
+ switch (instr->major_key()) { | |
+ case CodeStub::RegExpExec: { | |
+ RegExpExecStub stub(isolate()); | |
+ AllowHandleAllocation allow_handle; | |
+ llvm::Value* call = CallCode(stub.GetCode(), | |
+ llvm::CallingConv::X86_64_V8_Stub, | |
+ params); | |
+ llvm::Value* result = __ CreatePtrToInt(call, Types::i64); | |
+ instr->set_llvm_value(result); | |
+ break; | |
+ } | |
+ case CodeStub::SubString: { | |
+ SubStringStub stub(isolate()); | |
+ AllowHandleAllocation allow_handle; | |
+ llvm::Value* call = CallCode(stub.GetCode(), | |
+ llvm::CallingConv::X86_64_V8_Stub, | |
+ params); | |
+ instr->set_llvm_value(call); | |
+ break; | |
+ } | |
+ default: | |
+ UNREACHABLE(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCapturedObject(HCapturedObject* instr) { | |
+ instr->ReplayEnvironment(current_block_->last_environment()); | |
+ // There are no real uses of a captured object. | |
+} | |
+ | |
+void LLVMChunkBuilder::ChangeDoubleToTagged(HValue* val, HChange* instr) { | |
+ // TODO(llvm): this case in Crankshaft utilizes deferred calling. | |
+ llvm::Value* new_heap_number = nullptr; | |
+ DCHECK(Use(val)->getType()->isDoubleTy()); | |
+ if (FLAG_inline_new) | |
+ new_heap_number = AllocateHeapNumber(); | |
+ else | |
+ new_heap_number = AllocateHeapNumberSlow(); // i8* | |
+ | |
+ llvm::Value* store_address = FieldOperand(new_heap_number, | |
+ HeapNumber::kValueOffset); | |
+ llvm::Value* casted_address = __ CreateBitCast(store_address, | |
+ Types::ptr_float64); | |
+ | |
+ // [(i8*)new_heap_number + offset] = val; | |
+ __ CreateStore(Use(val), casted_address); | |
+ instr->set_llvm_value(new_heap_number); // no offset | |
+ | |
+ // TODO(llvm): AssignPointerMap(Define(result, result_temp)); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::LoadRoot(Heap::RootListIndex index) { | |
+ Address root_array_start_address = | |
+ ExternalReference::roots_array_start(isolate()).address(); | |
+ // TODO(llvm): Move(RelocInfo::EXTERNAL_REFERENCE) | |
+ auto int64_address = | |
+ __ getInt64(reinterpret_cast<uint64_t>(root_array_start_address)); | |
+ auto address = __ CreateBitOrPointerCast(int64_address, Types::ptr_tagged); | |
+ int offset = index << kPointerSizeLog2; | |
+ auto load_address = ConstructAddress(address, offset); | |
+ return __ CreateLoad(load_address); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::CompareRoot(llvm::Value* operand, | |
+ Heap::RootListIndex index, | |
+ llvm::CmpInst::Predicate predicate) { | |
+ llvm::Value* root_value_by_index = LoadRoot(index); | |
+ llvm::Value* cmp_result = __ CreateICmp(predicate, operand, | |
+ root_value_by_index, "CompareRoot"); | |
+ return cmp_result; | |
+} | |
+ | |
+void LLVMChunkBuilder::ChangeDoubleToI(HValue* val, HChange* instr) { | |
+ if (instr->CanTruncateToInt32()) { | |
+ llvm::Value* casted_int = __ CreateFPToSI(Use(val), Types::i64); | |
+ // FIXME: Figure out why we need this step. Fix for bitops-nsieve-bits | |
+ auto result = __ CreateTruncOrBitCast(casted_int, Types::i32); | |
+ instr->set_llvm_value(result); | |
+ //TODO: Overflow case | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::ChangeTaggedToDouble(HValue* val, HChange* instr) { | |
+ bool can_convert_undefined_to_nan = | |
+ instr->can_convert_undefined_to_nan(); | |
+ | |
+ bool deoptimize_on_minus_zero = instr->deoptimize_on_minus_zero(); | |
+ | |
+ llvm::BasicBlock* is_smi = NewBlock("NUMBER_CANDIDATE_IS_SMI"); | |
+ llvm::BasicBlock* is_any_tagged = NewBlock("NUMBER_CANDIDATE_IS_ANY_TAGGED"); | |
+ llvm::BasicBlock* merge_block = NewBlock( | |
+ std::string("ChangeTaggedToDouble Merge ") + std::to_string(instr->id())); | |
+ | |
+ llvm::Value* is_heap_number = nullptr; | |
+ llvm::Value* loaded_double_value = nullptr; | |
+ llvm::Value* nan_value = nullptr; | |
+ llvm::Value* llvm_val = Use(val); | |
+ llvm::Value* cond = SmiCheck(llvm_val); | |
+ llvm::BasicBlock* conversion_end = nullptr; | |
+ | |
+ if (!val->representation().IsSmi()) { | |
+ __ CreateCondBr(cond, is_smi, is_any_tagged); | |
+ __ SetInsertPoint(is_any_tagged); | |
+ | |
+ llvm::Value* vals_map = LoadFieldOperand(llvm_val, HeapObject::kMapOffset); | |
+ is_heap_number = CompareRoot(vals_map, Heap::kHeapNumberMapRootIndex); | |
+ | |
+ llvm::Value* value_addr = FieldOperand(llvm_val, HeapNumber::kValueOffset); | |
+ llvm::Value* value_as_double_addr = __ CreateBitCast(value_addr, | |
+ Types::ptr_float64); | |
+ | |
+ // On x64 it is safe to load at heap number offset before evaluating the map | |
+ // check, since all heap objects are at least two words long. | |
+ loaded_double_value = __ CreateLoad(value_as_double_addr); | |
+ | |
+ if (can_convert_undefined_to_nan) { | |
+ auto conversion_start = NewBlock("can_convert_undefined_to_nan " | |
+ "conversion_start"); | |
+ __ CreateCondBr(is_heap_number, merge_block, conversion_start); | |
+ | |
+ __ SetInsertPoint(conversion_start); | |
+ auto is_undefined = CompareRoot(llvm_val, Heap::kUndefinedValueRootIndex); | |
+ conversion_end = NewBlock("can_convert_undefined_to_nan: getting NaN"); | |
+ bool deopt_on_not_undefined = true; | |
+ DeoptimizeIf(is_undefined, Deoptimizer::kNotAHeapNumberUndefined, | |
+ deopt_on_not_undefined, conversion_end); | |
+ nan_value = GetNan(); | |
+ __ CreateBr(merge_block); | |
+ } else { | |
+ bool deopt_on_not_equal = true; | |
+ DeoptimizeIf(is_heap_number, Deoptimizer::kNotAHeapNumber, | |
+ deopt_on_not_equal, merge_block); | |
+ } | |
+ | |
+ if (deoptimize_on_minus_zero) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ } | |
+ | |
+ __ SetInsertPoint(is_smi); | |
+ auto int32_val = SmiToInteger32(val); | |
+ auto double_val_from_smi = __ CreateSIToFP(int32_val, Types::float64); | |
+ __ CreateBr(merge_block); | |
+ | |
+ __ SetInsertPoint(merge_block); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::float64, | |
+ 2 + can_convert_undefined_to_nan); | |
+ phi->addIncoming(loaded_double_value, is_any_tagged); | |
+ phi->addIncoming(double_val_from_smi, is_smi); | |
+ if (can_convert_undefined_to_nan) phi->addIncoming(nan_value, conversion_end); | |
+ instr->set_llvm_value(phi); | |
+} | |
+ | |
+void LLVMChunkBuilder::ChangeTaggedToISlow(HValue* val, HChange* instr) { | |
+ llvm::Value* cond = SmiCheck(Use(val)); | |
+ | |
+ llvm::BasicBlock* is_smi = NewBlock("is Smi fast case"); | |
+ llvm::BasicBlock* not_smi = NewBlock("is smi 'deferred' case"); | |
+ llvm::BasicBlock* zero_block = nullptr; | |
+ llvm::BasicBlock* merge_and_ret = NewBlock( | |
+ std::string("merge and ret ") + std::to_string(instr->id())); | |
+ llvm::BasicBlock* not_smi_merge = nullptr; | |
+ | |
+ __ CreateCondBr(cond, is_smi, not_smi); | |
+ | |
+ __ SetInsertPoint(is_smi); | |
+ llvm::Value* relult_for_smi = SmiToInteger32(val); | |
+ __ CreateBr(merge_and_ret); | |
+ | |
+ __ SetInsertPoint(not_smi); | |
+ llvm::Value* relult_for_not_smi = nullptr; | |
+ llvm::Value* minus_zero_result = nullptr; | |
+ bool truncating = instr->CanTruncateToInt32(); | |
+ | |
+ llvm::Value* vals_map = LoadFieldOperand(Use(val), HeapObject::kMapOffset); | |
+ llvm::Value* cmp = CompareRoot(vals_map, Heap::kHeapNumberMapRootIndex, | |
+ llvm::CmpInst::ICMP_NE); | |
+ | |
+ if (truncating) { | |
+ llvm::BasicBlock* truncate_heap_number = NewBlock("TruncateHeapNumberToI"); | |
+ llvm::BasicBlock* no_heap_number = NewBlock("Not a heap number"); | |
+ llvm::BasicBlock* merge_inner = NewBlock("inner merge"); | |
+ | |
+ __ CreateCondBr(cmp, no_heap_number, truncate_heap_number); | |
+ | |
+ __ SetInsertPoint(truncate_heap_number); | |
+ llvm::Value* value_addr = FieldOperand(Use(val), HeapNumber::kValueOffset); | |
+ // cast to ptr to double, fetch the double and convert to i32 | |
+ llvm::Value* double_addr = __ CreateBitCast(value_addr, Types::ptr_float64); | |
+ llvm::Value* double_val = __ CreateLoad(double_addr); | |
+ llvm::Value* truncate_heap_number_result = __ CreateFPToSI(double_val, | |
+ Types::i32); | |
+ | |
+ //TruncateHeapNumberToI | |
+ llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration( | |
+ module_.get(), llvm::Intrinsic::ssub_with_overflow, Types::i32); | |
+ llvm::Value* params[] = { __ getInt32(1), truncate_heap_number_result }; | |
+ llvm::Value* call = __ CreateCall(intrinsic, params); | |
+ llvm::Value* overflow = __ CreateExtractValue(call, 1); | |
+ llvm::BasicBlock* slow_case = NewBlock("ChangeTaggedToISlow slow case"); | |
+ llvm::BasicBlock* done = NewBlock("ChangeTaggedToISlow done"); | |
+ __ CreateCondBr(overflow, slow_case, done); | |
+ | |
+ __ SetInsertPoint(slow_case); | |
+ Register input_reg = rbx; | |
+ Register result_reg = rax; | |
+ int offset = HeapNumber::kValueOffset - kHeapObjectTag; | |
+ DoubleToIStub stub(isolate(), input_reg, result_reg, offset, true); | |
+ Handle<Code> code = Handle<Code>::null(); | |
+ { | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap_alloc; | |
+ code = stub.GetCode(); | |
+ } | |
+ Assert(__ getFalse()); // FIXME(llvm): Not tested this case | |
+ llvm::Value* fictive_val = __ getInt32(0); //fictive | |
+ std::vector<llvm::Value*> args = {fictive_val, truncate_heap_number_result}; | |
+ llvm::Value* result_intrisic = CallCode(code, | |
+ llvm::CallingConv::X86_64_V8_S12, | |
+ args); | |
+ llvm::Value* casted_result_intrisic = __ CreatePtrToInt(result_intrisic, | |
+ Types::i32); | |
+ __ CreateBr(done); | |
+ | |
+ __ SetInsertPoint(done); | |
+ llvm::PHINode* phi_done = __ CreatePHI(Types::i32, 2); | |
+ phi_done->addIncoming(casted_result_intrisic, slow_case); | |
+ phi_done->addIncoming(truncate_heap_number_result, truncate_heap_number); | |
+ __ CreateBr(merge_inner); | |
+ | |
+ __ SetInsertPoint(no_heap_number); | |
+ // Check for Oddballs. Undefined/False is converted to zero and True to one | |
+ // for truncating conversions. | |
+ llvm::BasicBlock* check_bools = NewBlock("ChangeTaggedToISlow check_bool"); | |
+ llvm::BasicBlock* no_check_bools = NewBlock("ChangeTaggedToISlow" | |
+ " no_check_bools"); | |
+ llvm::Value* cmp_undefined = CompareRoot(Use(val), | |
+ Heap::kUndefinedValueRootIndex, | |
+ llvm::CmpInst::ICMP_NE); | |
+ __ CreateCondBr(cmp_undefined, check_bools, no_check_bools); | |
+ __ SetInsertPoint(no_check_bools); | |
+ llvm::Value* result_no_check_bools = __ getInt32(0); | |
+ __ CreateBr(merge_inner); | |
+ | |
+ __ SetInsertPoint(check_bools); | |
+ llvm::BasicBlock* check_true = NewBlock("ChangeTaggedToISlow check_true"); | |
+ llvm::BasicBlock* check_false = NewBlock("ChangeTaggedToISlow check_false"); | |
+ llvm::Value* cmp_true = CompareRoot(Use(val), Heap::kTrueValueRootIndex, | |
+ llvm::CmpInst::ICMP_NE); | |
+ __ CreateCondBr(cmp_true, check_false, check_true); | |
+ | |
+ __ SetInsertPoint(check_true); | |
+ llvm::Value* result_check_true = __ getInt32(1); | |
+ __ CreateBr(merge_inner); | |
+ | |
+ __ SetInsertPoint(check_false); | |
+ llvm::Value* cmp_false = CompareRoot(Use(val), Heap::kFalseValueRootIndex, | |
+ llvm::CmpInst::ICMP_NE); | |
+ DeoptimizeIf(cmp_false, Deoptimizer::kNotAHeapNumberUndefinedBoolean); | |
+ llvm::Value* result_check_false = __ getInt32(0); | |
+ __ CreateBr(merge_inner); | |
+ | |
+ __ SetInsertPoint(merge_inner); | |
+ llvm::PHINode* phi_inner = __ CreatePHI(Types::i32, 4); | |
+ phi_inner->addIncoming(result_no_check_bools, no_check_bools); | |
+ phi_inner->addIncoming(result_check_true, check_true); | |
+ phi_inner->addIncoming(result_check_false, check_false); | |
+ phi_inner->addIncoming(phi_done, done); | |
+ relult_for_not_smi = phi_inner; | |
+ not_smi_merge = merge_inner; | |
+ } else { | |
+ // The comparison is already done with NE, so no need for negation here. | |
+ DeoptimizeIf(cmp, Deoptimizer::kNotAHeapNumber); | |
+ | |
+ auto address = FieldOperand(Use(val), HeapNumber::kValueOffset); | |
+ auto double_addr = __ CreateBitCast(address, Types::ptr_float64); | |
+ auto double_val = __ CreateLoad(double_addr); | |
+ // Convert the double to int32; convert it back do double and | |
+ // see it the 2 doubles are equal and neither is a NaN. | |
+ // If not, deopt (kLostPrecision or kNaN) | |
+ relult_for_not_smi = __ CreateFPToSI(double_val, Types::i32); | |
+ auto converted_double = __ CreateSIToFP(relult_for_not_smi, Types::float64); | |
+ auto ordered_and_equal = __ CreateFCmpOEQ(double_val, converted_double); | |
+ bool negate = true; | |
+ // TODO(llvm): in case they are unordered or equal, reason should be | |
+ // kLostPrecision. | |
+ DeoptimizeIf(ordered_and_equal, Deoptimizer::kNaN, negate); | |
+ if (instr->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { | |
+ zero_block = NewBlock("TaggedToISlow ZERO"); | |
+ auto equal_ = __ CreateICmpEQ(relult_for_not_smi, __ getInt32(0)); | |
+ __ CreateCondBr(equal_, zero_block, merge_and_ret); | |
+ __ SetInsertPoint(zero_block); | |
+ InsertDebugTrap(); | |
+ minus_zero_result = __ getInt32(0); | |
+ // __ CreateBr(not_smi_merge); | |
+ /*llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::x86_sse2_movmsk_pd); | |
+ llvm::Value* param_vect = __ CreateVectorSplat(2, input_val); | |
+ __ CreateInsertElement(param_vect, double_val, __ getInt32(0)); | |
+ llvm::Value* call = __ CreateCall(intrinsic, param_vect); | |
+ __ CreateAnd(call, __ getInt32(1)); //FIXME(llvm)://Possibly wrong | |
+ auto is_zero = __ CreateICmpEQ(call, __ getInt64(0)); | |
+ DeoptimizeIf(is_zero, false, merge_and_ret); */ | |
+ } | |
+ not_smi_merge = __ GetInsertBlock(); | |
+ } | |
+ __ CreateBr(merge_and_ret); | |
+ | |
+ __ SetInsertPoint(merge_and_ret); | |
+ llvm::PHINode* phi = nullptr; | |
+ if (instr->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) { | |
+ phi = __ CreatePHI(Types::i32, 3); | |
+ phi->addIncoming(minus_zero_result, zero_block); | |
+ phi->addIncoming(relult_for_smi, is_smi); | |
+ phi->addIncoming(relult_for_not_smi, not_smi_merge); | |
+ } | |
+ else { | |
+ phi = __ CreatePHI(Types::i32, 2); | |
+ phi->addIncoming(relult_for_smi, is_smi); | |
+ phi->addIncoming(relult_for_not_smi, not_smi_merge); | |
+ } | |
+ instr->set_llvm_value(phi); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoChange(HChange* instr) { | |
+ Representation from = instr->from(); | |
+ Representation to = instr->to(); | |
+ HValue* val = instr->value(); | |
+ if (from.IsSmi()) { | |
+ if (to.IsTagged()) { | |
+ auto as_tagged = __ CreateBitOrPointerCast(Use(val), Types::tagged); | |
+ instr->set_llvm_value(as_tagged); | |
+ return; | |
+ } | |
+ from = Representation::Tagged(); | |
+ } | |
+ if (from.IsTagged()) { | |
+ if (to.IsDouble()) { | |
+ ChangeTaggedToDouble(val, instr); | |
+ } else if (to.IsSmi()) { | |
+ if (!val->type().IsSmi()) { | |
+ bool not_smi = true; | |
+ llvm::Value* cond = SmiCheck(Use(val), not_smi); | |
+ DeoptimizeIf(cond, Deoptimizer::kNotASmi); | |
+ } | |
+ auto val_as_smi = __ CreateBitOrPointerCast(Use(val), Types::smi); | |
+ instr->set_llvm_value(val_as_smi); | |
+ } else { | |
+ DCHECK(to.IsInteger32()); | |
+ if (val->type().IsSmi() || val->representation().IsSmi()) { | |
+ // convert smi to int32, no need to perform smi check | |
+ // lithium codegen does __ AssertSmi(input) | |
+ instr->set_llvm_value(SmiToInteger32(val)); | |
+ } else { | |
+ ChangeTaggedToISlow(val, instr); | |
+ } | |
+ } | |
+ } else if (from.IsDouble()) { | |
+ if (to.IsInteger32()) { | |
+ ChangeDoubleToI(val, instr); | |
+ } else if (to.IsTagged()) { | |
+ ChangeDoubleToTagged(val, instr); | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ } else if (from.IsInteger32()) { | |
+ if (to.IsTagged()) { | |
+ if (!instr->CheckFlag(HValue::kCanOverflow)) { | |
+ auto smi_as_tagged = __ CreateBitOrPointerCast(Integer32ToSmi(val), | |
+ Types::tagged); | |
+ instr->set_llvm_value(smi_as_tagged); | |
+ } else if (instr->value()->CheckFlag(HInstruction::kUint32)) { | |
+ DoNumberTagU(instr); | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ } else if (to.IsSmi()) { | |
+ //TODO: not tested | |
+ if (instr->CheckFlag(HValue::kCanOverflow) && | |
+ instr->value()->CheckFlag(HValue::kUint32)) { | |
+ llvm::Value* cmp = nullptr; | |
+ if (SmiValuesAre32Bits()) { | |
+ //This will check if the high bit is set | |
+ //If it set we can't convert int to smi | |
+ cmp = __ CreateICmpSLT(Use(val), __ getInt32(0)); | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ DCHECK(SmiValuesAre31Bits()); | |
+ } | |
+ DeoptimizeIf(cmp, Deoptimizer::kOverflow); | |
+ // UNIMPLEMENTED(); | |
+ } | |
+ llvm::Value* result = Integer32ToSmi(val); | |
+ instr->set_llvm_value(result); | |
+ if (instr->CheckFlag(HValue::kCanOverflow) && | |
+ !instr->value()->CheckFlag(HValue::kUint32)) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ } else { | |
+ DCHECK(to.IsDouble()); | |
+ llvm::Value* double_val = __ CreateSIToFP(Use(val), Types::float64); | |
+ instr->set_llvm_value(double_val); | |
+ //UNIMPLEMENTED(); | |
+ } | |
+ } | |
+} | |
+void LLVMChunkBuilder::DoNumberTagU(HChange* instr){ | |
+ llvm::Value* val = Use(instr->value()); | |
+ llvm::Value* above_max = __ CreateICmpUGT(val, __ getInt32(Smi::kMaxValue)); | |
+ llvm::BasicBlock* deferred = NewBlock("IntToTag Deferred entry"); | |
+ llvm::BasicBlock* is_valid_smi = NewBlock("IntToTag Continue"); | |
+ llvm::BasicBlock* done = NewBlock("IntToTag Done"); | |
+ __ CreateCondBr(above_max, deferred, is_valid_smi); | |
+ | |
+ __ SetInsertPoint(is_valid_smi); | |
+ auto smi_result = Integer32ToSmi(val); | |
+ auto smi_result_tagged = __ CreateBitOrPointerCast(smi_result, Types::tagged); | |
+ __ CreateBr(done); | |
+ | |
+ __ SetInsertPoint(deferred); | |
+ auto number_as_double = __ CreateUIToFP(val, Types::float64); | |
+ llvm::Value* new_heap_number = nullptr; | |
+ // FIXME(llvm): we do not provide gc_required label... | |
+ if (FLAG_inline_new) { | |
+ new_heap_number = AllocateHeapNumber(); | |
+ auto double_addr = FieldOperand(new_heap_number, HeapNumber::kValueOffset); | |
+ double_addr = __ CreateBitOrPointerCast(double_addr, Types::ptr_float64); | |
+ __ CreateStore(number_as_double, double_addr); | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ __ CreateBr(done); | |
+ | |
+ __ SetInsertPoint(done); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::tagged, 2); | |
+ phi->addIncoming(smi_result_tagged, is_valid_smi); | |
+ phi->addIncoming(new_heap_number, deferred); | |
+ instr->set_llvm_value(phi); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { | |
+ if (!instr->value()->type().IsHeapObject()) { | |
+ llvm::Value* is_smi = SmiCheck(Use(instr->value())); | |
+ DeoptimizeIf(is_smi, Deoptimizer::kSmi); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { | |
+ llvm::Value* value = LoadFieldOperand(Use(instr->value()), | |
+ HeapObject::kMapOffset); | |
+ if (instr->is_interval_check()) { | |
+ InstanceType first; | |
+ InstanceType last; | |
+ instr->GetCheckInterval(&first, &last); | |
+ | |
+ llvm::Value* instance = LoadFieldOperand(value, Map::kInstanceTypeOffset); | |
+ llvm::Value* imm_first = __ getInt64(static_cast<int>(first)); | |
+ | |
+ // If there is only one type in the interval check for equality. | |
+ if (first == last) { | |
+ llvm::Value* cmp = __ CreateICmpNE(instance, imm_first); | |
+ DeoptimizeIf(cmp, Deoptimizer::kWrongInstanceType); | |
+ } else { | |
+ llvm::Value* cmp = __ CreateICmpULT(instance, imm_first); | |
+ DeoptimizeIf(cmp, Deoptimizer::kWrongInstanceType); | |
+ // Omit check for the last type. | |
+ if (last != LAST_TYPE) { | |
+ llvm::Value* imm_last = __ getInt64(static_cast<int>(last)); | |
+ llvm::Value* cmp = __ CreateICmpUGT(instance, imm_last); | |
+ DeoptimizeIf(cmp, Deoptimizer::kWrongInstanceType); | |
+ } | |
+ } | |
+ } else { | |
+ uint8_t mask; | |
+ uint8_t tag; | |
+ instr->GetCheckMaskAndTag(&mask, &tag); | |
+ | |
+ if (base::bits::IsPowerOfTwo32(mask)) { | |
+ llvm::Value* addr = FieldOperand(value , Map::kInstanceTypeOffset); | |
+ llvm::Value* cast_to_int = __ CreateBitCast(addr, Types::ptr_i64); | |
+ llvm::Value* val = __ CreateLoad(cast_to_int); | |
+ llvm::Value* test = __ CreateAnd(val, __ getInt64(mask)); | |
+ llvm::Value* cmp = nullptr; | |
+ if (tag == 0) { | |
+ cmp = __ CreateICmpNE(test, __ getInt64(0)); | |
+ } else { | |
+ cmp = __ CreateICmpEQ(test, __ getInt64(0)); | |
+ } | |
+ DeoptimizeIf(cmp, Deoptimizer::kWrongInstanceType); | |
+ } else { | |
+ //TODO: not tested (fail form string-tagcloud.js in function "" | |
+ // fail form date-format-tofte.js in arrayExists) | |
+ llvm::Value* instance_offset = LoadFieldOperand(value, | |
+ Map::kInstanceTypeOffset); | |
+ | |
+ llvm::Value* casted_offset = __ CreatePtrToInt(instance_offset, Types::i64); | |
+ llvm::Value* and_value = __ CreateAnd(casted_offset, __ getInt64(0x000000ff)); | |
+ llvm::Value* and_mask = __ CreateAnd(and_value, __ getInt64(mask)); | |
+ llvm::Value* cmp = __ CreateICmpEQ(and_mask, __ getInt64(tag)); | |
+ DeoptimizeIf(cmp, Deoptimizer::kWrongInstanceType, true); | |
+ } | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::Retry(BailoutReason reason) { | |
+ info()->RetryOptimization(reason); | |
+ status_ = ABORTED; | |
+} | |
+ | |
+void LLVMChunkBuilder::AddStabilityDependency(Handle<Map> map) { | |
+ if (!map->is_stable()) return Retry(kMapBecameUnstable); | |
+ chunk()->AddStabilityDependency(map); | |
+ // TODO(llvm): stability_dependencies_ unused yet | |
+} | |
+ | |
+void LLVMChunkBuilder::AddDeprecationDependency(Handle<Map> map) { | |
+ if (map->is_deprecated()) return Retry(kMapBecameDeprecated); | |
+ chunk()->AddDeprecationDependency(map); | |
+ // TODO(llvm): stability_dependencies_ unused yet | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCheckMaps(HCheckMaps* instr) { | |
+ if (instr->IsStabilityCheck()) { | |
+ const UniqueSet<Map>* maps = instr->maps(); | |
+ for (int i = 0; i < maps->size(); ++i) { | |
+ AddStabilityDependency(maps->at(i).handle()); | |
+ } | |
+ return; | |
+ } | |
+ DCHECK(instr->HasNoUses()); | |
+ llvm::Value* val = Use(instr->value()); | |
+ llvm::BasicBlock* success = NewBlock("CheckMaps success"); | |
+ std::vector<llvm::BasicBlock*> check_blocks; | |
+ const UniqueSet<Map>* maps = instr->maps(); | |
+ for (int i = 0; i < maps->size(); i++) | |
+ check_blocks.push_back(NewBlock("CheckMap")); | |
+ DCHECK(maps->size() > 0); | |
+ __ CreateBr(check_blocks[0]); | |
+ for (int i = 0; i < maps->size() - 1; i++) { | |
+ Handle<Map> map = maps->at(i).handle(); | |
+ __ SetInsertPoint(check_blocks[i]); | |
+ llvm::Value* compare = CompareMap(val, map); | |
+ __ CreateCondBr(compare, success, check_blocks[i + 1]); | |
+ } | |
+ __ SetInsertPoint(check_blocks[maps->size() - 1]); | |
+ llvm::Value* compare = CompareMap(val, maps->at(maps->size() - 1).handle()); | |
+ if (instr->HasMigrationTarget()) { | |
+ // Call deferred. | |
+ bool deopt_on_equal = false; | |
+ llvm::BasicBlock* defered_block = NewBlock("CheckMaps deferred"); | |
+ __ CreateCondBr(compare, success, defered_block); | |
+ __ SetInsertPoint(defered_block); | |
+ DCHECK(pending_pushed_args_.is_empty()); | |
+ pending_pushed_args_.Add(Use(instr->value()), info()->zone()); | |
+ llvm::Value* result = CallRuntimeViaId(Runtime::kTryMigrateInstance); | |
+ llvm::Value* casted = __ CreateBitOrPointerCast(result, Types::i64); | |
+ llvm::Value* and_result = __ CreateAnd(casted, __ getInt64(kSmiTagMask)); | |
+ llvm::Value* compare_result = __ CreateICmpEQ(and_result, __ getInt64(0)); | |
+ DeoptimizeIf(compare_result, Deoptimizer::kInstanceMigrationFailed, | |
+ deopt_on_equal, success); | |
+ pending_pushed_args_.Clear(); | |
+ // Don't let the success BB go stray (__ SetInsertPoint). | |
+ | |
+ } else { | |
+ bool deopt_on_not_equal = true; | |
+ DeoptimizeIf(compare, Deoptimizer::kWrongMap, deopt_on_not_equal, success); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) { | |
+ llvm::Value* val = Use(instr->value()); | |
+ llvm::Value* tagged_val = FieldOperand(val, HeapObject::kMapOffset); | |
+ llvm::Value* cmp = __ CreateICmpNE(Use(instr->map()), tagged_val); | |
+ DeoptimizeIf(cmp, Deoptimizer::kWrongMap, true); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCheckSmi(HCheckSmi* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCheckValue(HCheckValue* instr) { | |
+ llvm::Value* reg = Use(instr->value()); | |
+ Handle<Object> source = instr->object().handle(); | |
+ llvm::Value* cmp = nullptr; | |
+ if (source->IsSmi()) { | |
+ Smi* smi = Smi::cast(*source); | |
+ intptr_t intptr_value = reinterpret_cast<intptr_t>(smi); | |
+ llvm::Value* value = __ getInt64(intptr_value); | |
+ cmp = __ CreateICmpNE(reg, value); | |
+ } else { | |
+ auto obj = MoveHeapObject(instr->object().handle()); | |
+ cmp = __ CreateICmpNE(reg, obj); | |
+ } | |
+ DeoptimizeIf(cmp, Deoptimizer::kValueMismatch); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoClampToUint8(HClampToUint8* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoClassOfTestAndBranch(HClassOfTestAndBranch* instr) { | |
+ UNIMPLEMENTED(); | |
+ // search test what it use this case | |
+ // because I think what loop is not correctly | |
+ llvm::Value* input = Use(instr->value()); | |
+ llvm::Value* temp = nullptr; | |
+ llvm::Value* temp2 = nullptr; | |
+ Handle<String> class_name = instr->class_name(); | |
+ llvm::BasicBlock* input_not_smi = NewBlock("DoClassOfTestAndBranch" | |
+ "input NotSmi"); | |
+ llvm::BasicBlock* continue_ = NewBlock("DoClassOfTestAndBranch Continue"); | |
+ llvm::BasicBlock* insert = __ GetInsertBlock(); | |
+ llvm::Value* smi_cond = SmiCheck(input); | |
+ __ CreateCondBr(smi_cond, Use(instr->SuccessorAt(1)), input_not_smi); | |
+ __ SetInsertPoint(input_not_smi); | |
+ | |
+ if (String::Equals(isolate()->factory()->Function_string(), class_name)) { | |
+ STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); | |
+ STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == | |
+ FIRST_SPEC_OBJECT_TYPE + 1); | |
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == | |
+ LAST_SPEC_OBJECT_TYPE - 1); | |
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); | |
+ UNIMPLEMENTED(); | |
+ } else { | |
+ temp = LoadFieldOperand(input, HeapObject::kMapOffset); | |
+ llvm::Value* instance_type = LoadFieldOperand(temp, | |
+ Map::kInstanceTypeOffset); | |
+ temp2 = __ CreateAnd(instance_type, __ getInt64(0x000000ff)); | |
+ llvm::Value* obj_type = __ getInt64(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); | |
+ llvm::Value* sub = __ CreateSub(temp2, obj_type); | |
+ llvm::Value* imm = __ getInt64(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - | |
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); | |
+ llvm::Value* cmp = __ CreateICmpUGE(sub, imm); | |
+ __ CreateCondBr(cmp, Use(instr->SuccessorAt(1)), continue_); | |
+ __ SetInsertPoint(continue_); | |
+ } | |
+ | |
+ llvm::BasicBlock* loop = NewBlock("DoClassOfTestAndBranch loop"); | |
+ llvm::BasicBlock* loop_not_smi = NewBlock("DoClassOfTestAndBranch " | |
+ "loop not_smi"); | |
+ llvm::BasicBlock* equal = NewBlock("DoClassOfTestAndBranch loop type equal"); | |
+ llvm::BasicBlock* done = NewBlock("DoClassOfTestAndBranch done"); | |
+ llvm::Value* map = LoadFieldOperand(temp, | |
+ Map::kConstructorOrBackPointerOffset); | |
+ llvm::Value* new_map = nullptr; | |
+ __ CreateBr(loop); | |
+ | |
+ __ SetInsertPoint(loop); | |
+ llvm::PHINode* phi_map = __ CreatePHI(Types::i64, 2); | |
+ phi_map->addIncoming(map, insert); | |
+ llvm::Value* map_is_smi = SmiCheck(phi_map); | |
+ __ CreateCondBr(map_is_smi, done, loop_not_smi); | |
+ | |
+ __ SetInsertPoint(loop_not_smi); | |
+ llvm::Value* scratch = LoadFieldOperand(phi_map, HeapObject::kMapOffset); | |
+ llvm::Value* other_map = LoadFieldOperand(scratch, Map::kInstanceTypeOffset); | |
+ llvm::Value* type_cmp = __ CreateICmpNE(other_map, __ getInt64(static_cast<int>(MAP_TYPE))); | |
+ __ CreateCondBr(type_cmp, done, equal); | |
+ | |
+ __ SetInsertPoint(equal); | |
+ new_map = LoadFieldOperand(phi_map, Map::kConstructorOrBackPointerOffset); | |
+ phi_map->addIncoming(new_map, equal); | |
+ __ CreateBr(loop); | |
+ | |
+ //TODO: need solv dominate all uses for other_map | |
+ llvm::Value* zero = __ getInt64(0); | |
+ __ SetInsertPoint(done); | |
+ llvm::PHINode* phi_instance = __ CreatePHI(Types::i64, 2); | |
+ phi_instance->addIncoming(zero, insert); | |
+ phi_instance->addIncoming(other_map, loop_not_smi); | |
+ | |
+ llvm::Value* func_type = __ getInt64(static_cast<int>(JS_FUNCTION_TYPE)); | |
+ llvm::Value* CmpInstance = __ CreateICmpNE(phi_instance, func_type); | |
+ llvm::BasicBlock* after_cmp_instance = NewBlock("DoClassOfTestAndBranch after " | |
+ "CmpInstance"); | |
+ if (String::Equals(class_name, isolate()->factory()->Object_string())) { | |
+ __ CreateCondBr(CmpInstance, Use(instr->SuccessorAt(0)), | |
+ after_cmp_instance); | |
+ __ SetInsertPoint(after_cmp_instance); | |
+ } else { | |
+ __ CreateCondBr(CmpInstance, Use(instr->SuccessorAt(1)), | |
+ after_cmp_instance); | |
+ __ SetInsertPoint(after_cmp_instance); | |
+ } | |
+ | |
+ llvm::Value* shared_info = LoadFieldOperand(phi_map, JSFunction::kSharedFunctionInfoOffset); | |
+ llvm::Value* instance_class_name = LoadFieldOperand(shared_info, | |
+ SharedFunctionInfo::kInstanceClassNameOffset); | |
+ DCHECK(class_name->IsInternalizedString()); | |
+ llvm::Value* result = nullptr; | |
+ AllowDeferredHandleDereference smi_check; | |
+ if (class_name->IsSmi()) { | |
+ UNIMPLEMENTED(); | |
+ } else { | |
+ llvm::Value* name = MoveHeapObject(class_name); | |
+ result = __ CreateICmpEQ(instance_class_name, name); | |
+ } | |
+ __ CreateCondBr(result, Use(instr->SuccessorAt(0)), Use(instr->SuccessorAt(1))); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCompareNumericAndBranch( | |
+ HCompareNumericAndBranch* instr) { | |
+ Representation r = instr->representation(); | |
+ HValue* left = instr->left(); | |
+ HValue* right = instr->right(); | |
+ DCHECK(left->representation().Equals(r)); | |
+ DCHECK(right->representation().Equals(r)); | |
+ bool is_unsigned = r.IsDouble() | |
+ || left->CheckFlag(HInstruction::kUint32) | |
+ || right->CheckFlag(HInstruction::kUint32); | |
+ | |
+ bool is_double = instr->representation().IsDouble(); | |
+ llvm::CmpInst::Predicate pred = TokenToPredicate(instr->token(), | |
+ is_unsigned, | |
+ is_double); | |
+ if (r.IsSmi()) { | |
+ llvm::Value* compare = __ CreateICmp(pred, Use(left), Use(right)); | |
+ llvm::Value* branch = __ CreateCondBr(compare, | |
+ Use(instr->SuccessorAt(0)), Use(instr->SuccessorAt(1))); | |
+ instr->set_llvm_value(branch); | |
+ } else if (r.IsInteger32()) { | |
+ llvm::Value* llvm_left = Use(left); | |
+ llvm::Value* llvm_right = Use(right); | |
+ llvm::Value* compare = __ CreateICmp(pred, llvm_left, llvm_right); | |
+ llvm::Value* branch = __ CreateCondBr(compare, | |
+ Use(instr->SuccessorAt(0)), Use(instr->SuccessorAt(1))); | |
+ instr->set_llvm_value(branch); | |
+ } else { | |
+ DCHECK(r.IsDouble()); | |
+ llvm::Value* llvm_left = Use(left); | |
+ llvm::Value* llvm_right = Use(right); | |
+ llvm::Value* compare = __ CreateFCmp(pred, llvm_left, llvm_right); | |
+ llvm::Value* branch = __ CreateCondBr(compare, | |
+ Use(instr->SuccessorAt(0)), Use(instr->SuccessorAt(1))); | |
+ instr->set_llvm_value(branch); | |
+ //FIXME: Hanlde Nan case, parity_even case | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCompareHoleAndBranch(HCompareHoleAndBranch* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { | |
+ Token::Value op = instr->token(); | |
+ Handle<Code> ic = Handle<Code>::null(); | |
+ { | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap; | |
+ ic = CodeFactory::CompareIC(isolate(), op, instr->strength()).code(); | |
+ } | |
+ llvm::CmpInst::Predicate pred = TokenToPredicate(op, false, false); | |
+ auto context = Use(instr->context()); | |
+ auto left = Use(instr->left()); | |
+ auto right = Use(instr->right()); | |
+ std::vector<llvm::Value*> params = { context, left, right }; | |
+ auto result = CallCode(ic, llvm::CallingConv::X86_64_V8_S10, params); | |
+ result = __ CreateBitOrPointerCast(result, Types::i64); | |
+ // Lithium comparison is a little strange, I think mine is all right. | |
+ auto compare_result = __ CreateICmp(pred, result, __ getInt64(0)); | |
+ auto compare_true = NewBlock("generic comparison true"); | |
+ auto compare_false = NewBlock("generic comparison false"); | |
+ llvm::Value* true_value = nullptr; | |
+ llvm::Value* false_value = nullptr; | |
+ auto merge = NewBlock("generic comparison merge"); | |
+ __ CreateCondBr(compare_result, compare_true, compare_false); | |
+ | |
+ __ SetInsertPoint(compare_true); | |
+ true_value = LoadRoot(Heap::kTrueValueRootIndex); | |
+ __ CreateBr(merge); | |
+ | |
+ __ SetInsertPoint(compare_false); | |
+ false_value = LoadRoot(Heap::kFalseValueRootIndex); | |
+ __ CreateBr(merge); | |
+ | |
+ __ SetInsertPoint(merge); | |
+ auto phi = __ CreatePHI(Types::tagged, 2); | |
+ phi->addIncoming(true_value, compare_true); | |
+ phi->addIncoming(false_value, compare_false); | |
+ instr->set_llvm_value(phi); | |
+ // calling convention should be v8_ic | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCompareMinusZeroAndBranch(HCompareMinusZeroAndBranch* instr) { | |
+ Representation rep = instr->value()->representation(); | |
+ | |
+ if (rep.IsDouble()) { | |
+ llvm::Value* zero = llvm::ConstantFP::get(Types::float64, 0); | |
+ llvm::Value* not_zero = __ CreateFCmpONE(Use(instr->value()), zero); | |
+ llvm::BasicBlock* is_zero = NewBlock("Instruction value is zero"); | |
+ __ CreateCondBr(not_zero, Use(instr->SuccessorAt(1)), is_zero); | |
+ __ SetInsertPoint(is_zero); | |
+ llvm::Value* cmp = __ CreateFCmpONE(Use(instr->value()), zero); | |
+ llvm::BranchInst* branch = __ CreateCondBr(cmp, Use(instr->SuccessorAt(0)), Use(instr->SuccessorAt(1))); | |
+ instr->set_llvm_value(branch); | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCompareObjectEqAndBranch(HCompareObjectEqAndBranch* instr) { | |
+ //TODO: Test this case. charCodeAt function | |
+ llvm::Value* cmp = nullptr; | |
+ if (instr->right()->IsConstant()) { | |
+ HConstant* constant = HConstant::cast(instr->right()); | |
+ Handle<Object> handle_value = constant->handle(isolate()); | |
+ llvm::Value* obj = MoveHeapObject(handle_value); | |
+ cmp = __ CreateICmpEQ(Use(instr->left()), obj); | |
+ } else { | |
+ cmp = __ CreateICmpEQ(Use(instr->left()), Use(instr->right())); | |
+ } | |
+ __ CreateCondBr(cmp, Use(instr->SuccessorAt(0)), Use(instr->SuccessorAt(1))); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCompareMap(HCompareMap* instr) { | |
+ auto compare = CompareMap(Use(instr->value()), instr->map().handle()); | |
+ llvm::BranchInst* branch = __ CreateCondBr(compare, | |
+ Use(instr->SuccessorAt(0)), Use(instr->SuccessorAt(1))); | |
+ instr->set_llvm_value(branch); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoConstructDouble(HConstructDouble* instr) { | |
+ //TODO Not tested. | |
+ llvm::Value* hi = Use(instr->hi()); | |
+ llvm::Value* lo = Use(instr->lo()); | |
+ llvm::Value* hi_ext = __ CreateZExt(hi, Types::i64); | |
+ llvm::Value* hi_shift = __ CreateShl(hi_ext, __ getInt64(32)); | |
+ llvm::Value* result = __ CreateOr(hi_shift, lo); | |
+ llvm::Value* result_double = __ CreateSIToFP(result, Types::float64); | |
+ instr->set_llvm_value(result_double); | |
+} | |
+ | |
+int64_t LLVMChunkBuilder::RootRegisterDelta(ExternalReference other) { | |
+ if (//predictable_code_size() && | |
+ (other.address() < reinterpret_cast<Address>(isolate()) || | |
+ other.address() >= reinterpret_cast<Address>(isolate() + 1))) { | |
+ return -1; | |
+ } | |
+ Address roots_register_value = kRootRegisterBias + | |
+ reinterpret_cast<Address>(isolate()->heap()->roots_array_start()); | |
+ | |
+ int64_t delta = -1; | |
+ if (kPointerSize == kInt64Size) { | |
+ delta = other.address() - roots_register_value; | |
+ } else { | |
+ uint64_t o = static_cast<uint32_t>( | |
+ reinterpret_cast<intptr_t>(other.address())); | |
+ uint64_t r = static_cast<uint32_t>( | |
+ reinterpret_cast<intptr_t>(roots_register_value)); | |
+ delta = o + r; | |
+ } | |
+ return delta; | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::ExternalOperand(ExternalReference target) { | |
+ int64_t delta = RootRegisterDelta(target); | |
+ Address root_array_start_address = | |
+ ExternalReference::roots_array_start(isolate()).address(); | |
+ auto int64_address = | |
+ __ getInt64(reinterpret_cast<uint64_t>(root_array_start_address)); | |
+ auto load_address = ConstructAddress(int64_address, delta); | |
+ auto casted_address = __ CreateBitCast(load_address, Types::ptr_i64); | |
+ llvm::Value* object = __ CreateLoad(casted_address); | |
+ return object; | |
+} | |
+ | |
+void LLVMChunkBuilder::PrepareCallCFunction(int num_arguments) { | |
+ int frame_alignment = base::OS::ActivationFrameAlignment(); | |
+ DCHECK(frame_alignment != 0); | |
+ DCHECK(num_arguments >= 0); | |
+ int argument_slots_on_stack = | |
+ ArgumentStackSlotsForCFunctionCall(num_arguments); | |
+ // Reading from rsp | |
+ LLVMContext& llvm_context = LLVMGranularity::getInstance().context(); | |
+ llvm::Function* read_from_rsp = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::read_register, { Types::i64 }); | |
+ auto metadata = | |
+ llvm::MDNode::get(llvm_context, llvm::MDString::get(llvm_context, "rsp")); | |
+ llvm::MetadataAsValue* val = llvm::MetadataAsValue::get( | |
+ llvm_context, metadata); | |
+ auto rsp_value = __ CreateCall(read_from_rsp, val); | |
+ //TODO Try to move rsp value | |
+ auto sub_v = __ CreateNSWSub(rsp_value, __ getInt64((argument_slots_on_stack + 1) * kRegisterSize)); | |
+ auto and_v = __ CreateAnd(sub_v, __ getInt64(-frame_alignment)); | |
+ auto address = ConstructAddress(and_v, argument_slots_on_stack * kRegisterSize); | |
+ auto casted_address = __ CreateBitCast(address, Types::ptr_i64); | |
+ __ CreateStore(rsp_value, casted_address); | |
+} | |
+ | |
+int LLVMChunkBuilder::ArgumentStackSlotsForCFunctionCall(int num_arguments) { | |
+ DCHECK(num_arguments >= 0); | |
+ const int kRegisterPassedArguments = 6; | |
+ if (num_arguments < kRegisterPassedArguments) return 0; | |
+ return num_arguments - kRegisterPassedArguments; | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::LoadAddress(ExternalReference source) { | |
+ const int64_t kInvalidRootRegisterDelta = -1; | |
+ llvm::Value* object = nullptr; | |
+ //if (root_array_available_ && !serializer_enabled()) { | |
+ int64_t delta = RootRegisterDelta(source); | |
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { | |
+ Address root_array_start_address = | |
+ ExternalReference::roots_array_start(isolate()).address(); | |
+ auto int64_address = | |
+ __ getInt64(reinterpret_cast<uint64_t>(root_array_start_address)); | |
+ object = LoadFieldOperand(int64_address, static_cast<int32_t>(delta)); | |
+ return object; | |
+ } else { | |
+ llvm::Value* address = __ getInt64(reinterpret_cast<uint64_t>(ExternalReference::get_date_field_function(isolate()).address())); | |
+ auto constructed_address = ConstructAddress(address, 0); | |
+ object = __ CreateLoad(constructed_address); | |
+ return object;} | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::CallCFunction(ExternalReference function, | |
+ std::vector<llvm::Value*> params, | |
+ int num_arguments) { | |
+ if (emit_debug_code()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ llvm::Value* obj = LoadAddress(function); | |
+ llvm::Value* call = CallVal(obj, llvm::CallingConv::X86_64_V8_S3, | |
+ params, Types::tagged); | |
+ DCHECK(base::OS::ActivationFrameAlignment() != 0); | |
+ DCHECK(num_arguments >= 0); | |
+ int argument_slots_on_stack = ArgumentStackSlotsForCFunctionCall(num_arguments); | |
+ LLVMContext& llvm_context = LLVMGranularity::getInstance().context(); | |
+ llvm::Function* intrinsic_read = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::read_register, { Types::i64 }); | |
+ auto metadata = | |
+ llvm::MDNode::get(llvm_context, llvm::MDString::get(llvm_context, "rsp")); | |
+ llvm::MetadataAsValue* val = llvm::MetadataAsValue::get( | |
+ llvm_context, metadata); | |
+ llvm::Value* rsp_value = __ CreateCall(intrinsic_read, val); | |
+ llvm::Value* address = ConstructAddress(rsp_value, argument_slots_on_stack * kRegisterSize); | |
+ llvm::Value* casted_address = __ CreateBitCast(address, Types::ptr_i64); | |
+ llvm::Value* object = __ CreateLoad(casted_address); | |
+ //Write into rsp | |
+ std::vector<llvm::Value*> parameter = {val, object}; | |
+ llvm::Function* intrinsic_write = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::write_register, { Types::i64 }); | |
+ __ CreateCall(intrinsic_write, parameter); | |
+ return call; | |
+} | |
+ | |
+void LLVMChunkBuilder::DoDateField(HDateField* instr) { | |
+ llvm::BasicBlock* date_field_equal = nullptr; | |
+ llvm::BasicBlock* date_field_runtime = NewBlock("Runtime"); | |
+ llvm::BasicBlock* DateFieldResult = NewBlock("Result block of DateField"); | |
+ llvm::Value* date_field_result_equal = nullptr; | |
+ Smi* index = instr->index(); | |
+ | |
+ if (FLAG_debug_code) { | |
+ AssertNotSmi(Use(instr->value())); | |
+ llvm::Value* map = FieldOperand(Use(instr->value()), | |
+ HeapObject::kMapOffset); | |
+ llvm::Value* cast_int = __ CreateBitCast(map, Types::ptr_i64); | |
+ llvm::Value* address = __ CreateLoad(cast_int); | |
+ llvm::Value* DateObject = LoadFieldOperand(address, Map::kMapOffset); | |
+ llvm::Value* object_type_check = __ CreateICmpEQ(DateObject, | |
+ __ getInt64(static_cast<int8_t>(JS_DATE_TYPE))); | |
+ Assert(object_type_check); | |
+ } | |
+ | |
+ if (index->value() == 0) { | |
+ llvm::Value* map = LoadFieldOperand(Use(instr->value()), JSDate::kValueOffset); | |
+ instr->set_llvm_value(map); | |
+ } else { | |
+ if (index->value() < JSDate::kFirstUncachedField) { | |
+ date_field_equal = NewBlock("equal"); | |
+ ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | |
+ llvm::Value* stamp_object = ExternalOperand(stamp); | |
+ llvm::Value* object = LoadFieldOperand(Use(instr->value()), JSDate::kCacheStampOffset); | |
+ llvm::Value* not_equal = __ CreateICmp(llvm::CmpInst::ICMP_NE, stamp_object, object); | |
+ __ CreateCondBr(not_equal, date_field_runtime, date_field_equal); | |
+ __ SetInsertPoint(date_field_equal); | |
+ date_field_result_equal = LoadFieldOperand(Use(instr->value()), JSDate::kValueOffset + | |
+ kPointerSize * index->value()); | |
+ __ CreateBr(DateFieldResult); | |
+ } | |
+ __ SetInsertPoint(date_field_runtime); | |
+ PrepareCallCFunction(2); | |
+ llvm::Value* param_one = Use(instr->value()); | |
+ intptr_t intptr_value = reinterpret_cast<intptr_t>(index); | |
+ llvm::Value* param_two = __ getInt64(intptr_value); | |
+ std::vector<llvm::Value*> params = { param_one, param_two }; | |
+ llvm::Value* result = CallCFunction(ExternalReference::get_date_field_function(isolate()), params, 2); | |
+ llvm::Value* date_field_result_runtime = __ CreatePtrToInt(result, Types::i64); | |
+ __ CreateBr(DateFieldResult); | |
+ __ SetInsertPoint(DateFieldResult); | |
+ if (date_field_equal) { | |
+ llvm::PHINode* phi = __ CreatePHI(Types::i64, 2); | |
+ phi->addIncoming(date_field_result_equal, date_field_equal); | |
+ phi->addIncoming(date_field_result_runtime, date_field_runtime); | |
+ instr->set_llvm_value(phi); | |
+ } else { | |
+ instr->set_llvm_value(date_field_result_runtime); | |
+ } | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoDebugBreak(HDebugBreak* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoDeoptimize(HDeoptimize* instr) { | |
+ Deoptimizer::BailoutType type = instr->type(); | |
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the | |
+ // needed return address), even though the implementation of LAZY and EAGER is | |
+ // now identical. When LAZY is eventually completely folded into EAGER, remove | |
+ // the special case below. | |
+ if (info()->IsStub() && type == Deoptimizer::EAGER) { | |
+ type = Deoptimizer::LAZY; | |
+ UNIMPLEMENTED(); | |
+ } | |
+ // we don't support lazy yet, since we have no test cases | |
+ // DCHECK(type == Deoptimizer::EAGER); | |
+ auto reason = instr->reason(); | |
+ USE(reason); | |
+ bool negate_condition = false; | |
+ // It's unreacheable, but we don't care. We need it so that DeoptimizeIf() | |
+ // does not create a new basic block which ends up unterminated. | |
+ auto next_block = Use(instr->SuccessorAt(0)); | |
+ DeoptimizeIf(__ getTrue(), instr->reason(), negate_condition, next_block); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoDiv(HDiv* instr) { | |
+ if(instr->representation().IsInteger32() || instr->representation().IsSmi()) { | |
+ DCHECK(instr->left()->representation().Equals(instr->representation())); | |
+ DCHECK(instr->right()->representation().Equals(instr->representation())); | |
+ HValue* dividend = instr->left(); | |
+ HValue* divisor = instr->right(); | |
+ llvm::Value* Div = __ CreateSDiv(Use(dividend), Use(divisor),""); | |
+ instr->set_llvm_value(Div); | |
+ } else if (instr->representation().IsDouble()) { | |
+ DCHECK(instr->representation().IsDouble()); | |
+ DCHECK(instr->left()->representation().IsDouble()); | |
+ DCHECK(instr->right()->representation().IsDouble()); | |
+ HValue* left = instr->left(); | |
+ HValue* right = instr->right(); | |
+ llvm::Value* fDiv = __ CreateFDiv(Use(left), Use(right), ""); | |
+ instr->set_llvm_value(fDiv); | |
+ } | |
+ else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoDoubleBits(HDoubleBits* instr) { | |
+ llvm::Value* value = Use(instr->value()); | |
+ if (instr->bits() == HDoubleBits::HIGH) { | |
+ llvm::Value* tmp = __ CreateBitCast(value, Types::i64); | |
+ value = __ CreateLShr(tmp, __ getInt64(32)); | |
+ value = __ CreateTrunc(value, Types::i32); | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ instr->set_llvm_value(value); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoDummyUse(HDummyUse* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoEnterInlined(HEnterInlined* instr) { | |
+ HEnvironment* outer = current_block_->last_environment(); | |
+ outer->set_ast_id(instr->ReturnId()); | |
+ HConstant* undefined = graph()->GetConstantUndefined(); | |
+ HEnvironment* inner = outer->CopyForInlining(instr->closure(), | |
+ instr->arguments_count(), | |
+ instr->function(), | |
+ undefined, | |
+ instr->inlining_kind()); | |
+ // Only replay binding of arguments object if it wasn't removed from graph. | |
+ if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) { | |
+ inner->Bind(instr->arguments_var(), instr->arguments_object()); | |
+ } | |
+ inner->BindContext(instr->closure_context()); | |
+ inner->set_entry(instr); | |
+ current_block_->UpdateEnvironment(inner); | |
+ chunk()->AddInlinedFunction(instr->shared()); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoForceRepresentation(HForceRepresentation* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) { | |
+ llvm::Value* map_val = Use(instr->map()); | |
+ llvm::BasicBlock* load_cache = NewBlock("LOAD CACHE"); | |
+ llvm::BasicBlock* done_block1 = NewBlock("DONE1"); | |
+ llvm::BasicBlock* done_block = NewBlock("DONE"); | |
+ | |
+ llvm::Value* result = EnumLength(map_val); | |
+ llvm::Value* cmp_neq = __ CreateICmpNE(result, __ getInt64(0)); | |
+ __ CreateCondBr(cmp_neq, load_cache, done_block1); | |
+ __ SetInsertPoint(done_block1); | |
+ llvm::Value* result1 = LoadRoot(Heap::kEmptyFixedArrayRootIndex); | |
+ __ CreateBr(done_block); | |
+ __ SetInsertPoint(load_cache); | |
+ result = LoadFieldOperand(map_val, Map::kDescriptorsOffset); | |
+ result = LoadFieldOperand(result, DescriptorArray::kEnumCacheOffset); | |
+ result = LoadFieldOperand(result, FixedArray::SizeFor(HForInCacheArray::cast(instr)->idx())); | |
+ __ CreateBr(done_block); | |
+ __ SetInsertPoint(done_block); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::tagged, 2); | |
+ phi->addIncoming(result1, done_block1); | |
+ phi->addIncoming(result, load_cache); | |
+ llvm::Value* cond = SmiCheck(phi, true); | |
+ DeoptimizeIf(cond, Deoptimizer::kNoCache, true); | |
+ instr->set_llvm_value(phi); | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::EnumLength(llvm::Value* map) { | |
+ STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); | |
+ llvm::Value* length = LoadFieldOperand(map, Map::kBitField3Offset); | |
+ llvm::Value* tagged = __ CreatePtrToInt(length, Types::i64); | |
+ llvm::Value* length32 = __ CreateIntCast(tagged, Types::i32, true); | |
+ llvm::Value* imm = __ getInt32(Map::EnumLengthBits::kMask); | |
+ llvm::Value* result = __ CreateAnd(length32, imm); | |
+ return Integer32ToSmi(result); | |
+} | |
+ | |
+void LLVMChunkBuilder::CheckEnumCache(HValue* enum_val, llvm::Value* val, | |
+ llvm::BasicBlock* call_runtime) { | |
+ llvm::BasicBlock* next = NewBlock("CheckEnumCache next"); | |
+ llvm::BasicBlock* start = NewBlock("CheckEnumCache start"); | |
+ llvm::Value* copy_enumerable = Use(enum_val); | |
+ llvm::Value* empty_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex); | |
+ // Check if the enum length field is properly initialized, indicating that | |
+ // there is an enum cache. | |
+ llvm::Value* map = LoadFieldOperand(copy_enumerable, HeapObject::kMapOffset); | |
+ llvm::Value* length = EnumLength(map); | |
+ Smi* invalidEnum = Smi::FromInt(kInvalidEnumCacheSentinel); | |
+ llvm::Value* enum_length = ValueFromSmi(invalidEnum); | |
+ llvm::Value* cmp = __ CreateICmpEQ(length, enum_length); | |
+ __ CreateCondBr(cmp, call_runtime, start); | |
+ | |
+ __ SetInsertPoint(next); | |
+ map = LoadFieldOperand(copy_enumerable, HeapObject::kMapOffset); | |
+ | |
+ // For all objects but the receiver, check that the cache is empty. | |
+ length = EnumLength(map); | |
+ llvm::Value* test = __ CreateAnd(length, length); | |
+ llvm::Value* zero = __ getInt64(0); | |
+ llvm::Value* cmp_zero = __ CreateICmpNE(test, zero); | |
+ __ CreateCondBr(cmp_zero, call_runtime, start); | |
+ | |
+ __ SetInsertPoint(start); | |
+ // Check that there are no elements. Register rcx contains the current JS | |
+ // object we've reached through the prototype chain. | |
+ llvm::BasicBlock* no_elements = NewBlock("CheckEnumCache no_elements"); | |
+ llvm::BasicBlock* second_chance = NewBlock("CheckEnumCache Second chance"); | |
+ llvm::Value* object = LoadFieldOperand(copy_enumerable, | |
+ JSObject::kElementsOffset); | |
+ llvm::Value* cmp_obj = __ CreateICmpEQ(empty_array, object); | |
+ __ CreateCondBr(cmp_obj, no_elements, second_chance); | |
+ | |
+ __ SetInsertPoint(second_chance); | |
+ llvm::Value* slow = LoadRoot( Heap::kEmptySlowElementDictionaryRootIndex); | |
+ llvm::Value* second_cmp_obj = __ CreateICmpNE(slow, object); | |
+ __ CreateCondBr(second_cmp_obj, call_runtime, no_elements); | |
+ | |
+ __ SetInsertPoint(no_elements); | |
+ copy_enumerable = LoadFieldOperand(map, Map::kPrototypeOffset); | |
+ llvm::BasicBlock* final_block = NewBlock("CheckEnumCache end"); | |
+ llvm::Value* final_cmp = __ CreateICmpNE(copy_enumerable, val); | |
+ __ CreateCondBr(final_cmp, next, final_block); | |
+ | |
+ __ SetInsertPoint(final_block); | |
+} | |
+ | |
+ | |
+void LLVMChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { | |
+ llvm::Value* enumerable = Use(instr->enumerable()); | |
+ llvm::Value* smi_check = SmiCheck(enumerable); | |
+ DeoptimizeIf(smi_check, Deoptimizer::kSmi); | |
+ | |
+ STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | |
+ llvm::Value* cmp_type = CmpObjectType(enumerable, LAST_JS_PROXY_TYPE, | |
+ llvm::CmpInst::ICMP_ULE); | |
+ DeoptimizeIf(cmp_type, Deoptimizer::kWrongInstanceType); | |
+ | |
+ llvm::Value* root = LoadRoot(Heap::kNullValueRootIndex); | |
+ llvm::BasicBlock* call_runtime = NewBlock("DoForInPrepareMap call runtime"); | |
+ llvm::BasicBlock* merge = NewBlock("DoForInPrepareMap use cache"); | |
+ CheckEnumCache(instr->enumerable(), root, call_runtime); | |
+ llvm::BasicBlock* insert = __ GetInsertBlock(); | |
+ llvm::Value* map = LoadFieldOperand(enumerable, HeapObject::kMapOffset); | |
+ __ CreateBr(merge); | |
+ | |
+ // Get the set of properties to enumerate. | |
+ __ SetInsertPoint(call_runtime); | |
+ std::vector<llvm::Value*> args; | |
+ args.push_back(enumerable); | |
+ llvm::Value* set = CallRuntimeFromDeferred(Runtime::kGetPropertyNamesFast, | |
+ Use(instr->context()), args); | |
+ llvm::Value* runtime_map = LoadFieldOperand(set, HeapObject::kMapOffset); | |
+ llvm::Value* cmp_root = CompareRoot(runtime_map, Heap::kMetaMapRootIndex, | |
+ llvm::CmpInst::ICMP_NE); | |
+ DeoptimizeIf(cmp_root, Deoptimizer::kWrongMap); | |
+ __ CreateBr(merge); | |
+ | |
+ __ SetInsertPoint(merge); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::tagged, 2); | |
+ phi->addIncoming(map, insert); | |
+ phi->addIncoming(set, call_runtime); | |
+ instr->set_llvm_value(phi); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoGetCachedArrayIndex(HGetCachedArrayIndex* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoHasCachedArrayIndexAndBranch(HHasCachedArrayIndexAndBranch* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { | |
+ InstanceType from = instr->from(); | |
+ InstanceType to = instr->to(); | |
+ if (from == FIRST_TYPE) return to; | |
+ DCHECK(from == to || to == LAST_TYPE); | |
+ return from; | |
+} | |
+ | |
+void LLVMChunkBuilder::DoHasInstanceTypeAndBranch(HHasInstanceTypeAndBranch* instr) { | |
+ llvm::Value* input = Use(instr->value()); | |
+ llvm::BasicBlock* near = NewBlock("HasInstanceTypeAndBranch Near"); | |
+ llvm::BranchInst* branch = nullptr; | |
+ | |
+ if (!instr->value()->type().IsHeapObject()) { | |
+ llvm::Value* smi_cond = SmiCheck(input); | |
+ branch = __ CreateCondBr(smi_cond, Use(instr->SuccessorAt(1)), near); | |
+ __ SetInsertPoint(near); | |
+ } | |
+ | |
+ | |
+ auto cmp = CmpObjectType(input, TestType(instr)); | |
+ branch = __ CreateCondBr(cmp, Use(instr->SuccessorAt(0)), | |
+ Use(instr->SuccessorAt(1))); | |
+ instr->set_llvm_value(branch); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoInnerAllocatedObject(HInnerAllocatedObject* instr) { | |
+ if(instr->offset()->IsConstant()) { | |
+ uint32_t offset = (HConstant::cast(instr->offset()))->Integer32Value(); | |
+ llvm::Value* gep = ConstructAddress(Use(instr->base_object()), offset); | |
+ instr->set_llvm_value(gep); | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoInstanceOf(HInstanceOf* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoHasInPrototypeChainAndBranch( | |
+ HHasInPrototypeChainAndBranch* instr) { | |
+ llvm::BasicBlock* insert = __ GetInsertBlock(); | |
+ llvm::Value* object = Use(instr->object()); | |
+ llvm::Value* prototype = Use(instr->prototype()); | |
+ if (instr->ObjectNeedsSmiCheck()) { | |
+ llvm::BasicBlock* check_smi = NewBlock("DoHasInPrototypeChainAndBranch" | |
+ " after check smi"); | |
+ llvm::Value* is_smi = SmiCheck(object, false); | |
+ __ CreateCondBr(is_smi, Use(instr->SuccessorAt(1)), check_smi); | |
+ | |
+ __ SetInsertPoint(check_smi); | |
+ } | |
+ | |
+ llvm::BasicBlock* loop = NewBlock("DoHasInPrototypeChainAndBranch loop"); | |
+ llvm::Value* object_map = LoadFieldOperand(object, HeapObject::kMapOffset); | |
+ llvm::BasicBlock* after_compare_root = NewBlock("DoHasInPrototypeChainAndBranch" | |
+ " after compare root"); | |
+ llvm::Value* load_object_map; | |
+ __ CreateBr(loop); | |
+ __ SetInsertPoint(loop); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::tagged, 2); | |
+ phi->addIncoming(object_map, insert); | |
+ llvm::Value* object_prototype = LoadFieldOperand(phi, | |
+ Map::kPrototypeOffset); | |
+ llvm::Value* cmp = __ CreateICmpEQ(object_prototype, prototype); | |
+ llvm::BasicBlock* compare_root = NewBlock("DoHasInPrototypeChainAndBranch" | |
+ " compare root"); | |
+ __ CreateCondBr(cmp, Use(instr->SuccessorAt(0)), compare_root); | |
+ | |
+ __ SetInsertPoint(compare_root); | |
+ llvm::Value* cmp_root = CompareRoot(object_prototype, | |
+ Heap::kNullValueRootIndex); | |
+ __ CreateCondBr(cmp_root, Use(instr->SuccessorAt(1)), after_compare_root); | |
+ | |
+ __ SetInsertPoint(after_compare_root); | |
+ load_object_map = LoadFieldOperand(object_prototype, HeapObject::kMapOffset); | |
+ phi->addIncoming(load_object_map, after_compare_root); | |
+ | |
+ __ CreateBr(loop); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { | |
+ //TODO: Not tested | |
+ Handle<JSFunction> known_function = instr->known_function(); | |
+ if (known_function.is_null()) { | |
+ UNIMPLEMENTED(); | |
+ } else { | |
+ bool dont_adapt_arguments = | |
+ instr->formal_parameter_count() == SharedFunctionInfo::kDontAdaptArgumentsSentinel; | |
+ bool can_invoke_directly = | |
+ dont_adapt_arguments || instr->formal_parameter_count() == (instr->argument_count()-1); | |
+ if (can_invoke_directly) { | |
+ llvm::Value* context = LoadFieldOperand(Use(instr->function()), JSFunction::kContextOffset); | |
+ | |
+ if (dont_adapt_arguments) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ | |
+ // InvokeF | |
+ if (instr->known_function().is_identical_to(info()->closure())) { | |
+ UNIMPLEMENTED(); | |
+ } else { | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(context); | |
+ params.push_back(Use(instr->function())); | |
+ for (int i = pending_pushed_args_.length()-1; i >=0; --i) | |
+ params.push_back(pending_pushed_args_[i]); | |
+ pending_pushed_args_.Clear(); | |
+ // callingConv | |
+ llvm::Value* call = CallVal(LoadFieldOperand(Use(instr->function()), JSFunction::kCodeEntryOffset), | |
+ llvm::CallingConv::X86_64_V8_S4, params, Types::tagged); | |
+ llvm::Value* return_val = __ CreatePtrToInt(call, Types::i64); | |
+ instr->set_llvm_value(return_val); | |
+ } | |
+ //TODO: Implement SafePoint with lazy deopt | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoIsConstructCallAndBranch( | |
+ HIsConstructCallAndBranch* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) { | |
+ llvm::Value* input = Use(instr->value()); | |
+ llvm::Value* is_smi = SmiCheck(input); | |
+ llvm::BranchInst* branch = __ CreateCondBr(is_smi, | |
+ Use(instr->SuccessorAt(0)), Use(instr->SuccessorAt(1))); | |
+ instr->set_llvm_value(branch); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoIsUndetectableAndBranch(HIsUndetectableAndBranch* instr) { | |
+ if (!instr->value()->type().IsHeapObject()) { | |
+ llvm::BasicBlock* after_check_smi = NewBlock("IsUndetectableAndBranch after check smi"); | |
+ llvm::Value* smi_cond = SmiCheck(Use(instr->value()), false); | |
+ __ CreateCondBr(smi_cond, Use(instr->SuccessorAt(1)), after_check_smi); | |
+ __ SetInsertPoint(after_check_smi); | |
+ } | |
+ llvm::Value* map = LoadFieldOperand(Use(instr->value()), HeapObject::kMapOffset); | |
+ llvm::Value* bitFiledOffset = LoadFieldOperand(map, Map::kBitFieldOffset); | |
+ llvm::Value* test = __ CreateICmpEQ(bitFiledOffset, __ getInt64(1 << Map::kIsUndetectable)); | |
+ llvm::Value* result = __ CreateCondBr(test, Use(instr->SuccessorAt(0)), Use(instr->SuccessorAt(1))); | |
+ instr->set_llvm_value(result); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { | |
+ HEnvironment* env = current_block_->last_environment(); | |
+ | |
+ if (env->entry()->arguments_pushed()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ | |
+ HEnvironment* outer = current_block_->last_environment()-> | |
+ DiscardInlined(false); | |
+ current_block_->UpdateEnvironment(outer); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { | |
+ llvm::Value* context = Use(instr->value()); | |
+ llvm::BasicBlock* insert = __ GetInsertBlock(); | |
+ auto offset = Context::SlotOffset(instr->slot_index()); | |
+ llvm::Value* result_addr = ConstructAddress(context, offset); | |
+ llvm::Value* result_casted = __ CreateBitCast(result_addr, Types::ptr_tagged); | |
+ llvm::Value* result = __ CreateLoad(result_casted); | |
+ llvm::Value* root = nullptr; | |
+ llvm::BasicBlock* load_root = nullptr; | |
+ | |
+ int count = 1; | |
+ if (instr->RequiresHoleCheck()) { | |
+ llvm::Value* cmp_root = CompareRoot(result, Heap::kTheHoleValueRootIndex); | |
+ if (instr->DeoptimizesOnHole()) { | |
+ UNIMPLEMENTED(); | |
+ } else { | |
+ load_root = NewBlock("DoLoadContextSlot load root"); | |
+ llvm::BasicBlock* is_not_hole = NewBlock("DoLoadContextSlot" | |
+ "is not hole"); | |
+ count = 2; | |
+ __ CreateCondBr(cmp_root, load_root, is_not_hole); | |
+ __ SetInsertPoint(load_root); | |
+ root = LoadRoot(Heap::kUndefinedValueRootIndex); | |
+ __ CreateBr(is_not_hole); | |
+ | |
+ __ SetInsertPoint(is_not_hole); | |
+ } | |
+ } | |
+ | |
+ if (count == 1) { | |
+ instr->set_llvm_value(result); | |
+ } else { | |
+ llvm::PHINode* phi = __ CreatePHI(Types::tagged, 2); | |
+ phi->addIncoming(result, insert); | |
+ phi->addIncoming(root, load_root); | |
+ instr->set_llvm_value(phi); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { | |
+ llvm::Value* object = Use(instr->object()); | |
+ llvm::Value* index = nullptr; | |
+ if (instr->index()->representation().IsTagged()) { | |
+ llvm::Value* temp = Use(instr->index()); | |
+ index = __ CreatePtrToInt(temp, Types::i64); | |
+ } else { | |
+ index = Use(instr->index()); | |
+ } | |
+ | |
+ // DeferredLoadMutableDouble case does not implemented, | |
+ llvm::BasicBlock* out_of_obj = NewBlock("OUT OF OBJECT"); | |
+ llvm::BasicBlock* done1 = NewBlock("DONE1"); | |
+ llvm::BasicBlock* done = NewBlock("DONE"); | |
+ llvm::Value* smi_tmp = __ CreateAShr(index, __ getInt64(1)); | |
+ index = __ CreateLShr(smi_tmp, kSmiShift); | |
+ index = __ CreateTrunc(index, Types::i32); | |
+ llvm::Value* cmp_less = __ CreateICmpSLT(index, __ getInt32(0)); | |
+ __ CreateCondBr(cmp_less, out_of_obj, done1); | |
+ __ SetInsertPoint(done1); | |
+ //FIXME: Use BuildFastArrayOperand | |
+ llvm::Value* scale = __ getInt32(8); | |
+ llvm::Value* offset = __ getInt32(JSObject::kHeaderSize); | |
+ llvm::Value* mul = __ CreateMul(index, scale); | |
+ llvm::Value* add = __ CreateAdd(mul, offset); | |
+ llvm::Value* int_ptr = __ CreateIntToPtr(object, Types::ptr_i8); | |
+ llvm::Value* gep_0 = __ CreateGEP(int_ptr, add); | |
+ llvm::Value* tmp1 = __ CreateBitCast(gep_0, Types::ptr_i64); | |
+ llvm::Value* int64_val1 = __ CreateLoad(tmp1); | |
+ __ CreateBr(done); | |
+ __ SetInsertPoint(out_of_obj); | |
+ scale = __ getInt64(8); | |
+ offset = __ getInt64(JSObject::kHeaderSize-kPointerSize); | |
+ llvm::Value* v2 = LoadFieldOperand(object, JSObject::kPropertiesOffset); | |
+ llvm::Value* int64_val = __ CreatePtrToInt(v2, Types::i64); | |
+ llvm::Value* neg_val1 = __ CreateNeg(int64_val); | |
+ llvm::Value* mul1 = __ CreateMul(neg_val1, scale); | |
+ llvm::Value* add1 = __ CreateAdd(mul1, offset); | |
+ llvm::Value* int_ptr1 = __ CreateIntToPtr(v2, Types::ptr_i8); | |
+ llvm::Value* v3 = __ CreateGEP(int_ptr1, add1); | |
+ llvm::Value* tmp2 = __ CreateBitCast(v3, Types::ptr_i64); | |
+ llvm::Value* int64_val2 = __ CreateLoad(tmp2); | |
+ __ CreateBr(done); | |
+ __ SetInsertPoint(done); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::i64, 2); | |
+ phi->addIncoming(int64_val1, done1); | |
+ phi->addIncoming(int64_val2, out_of_obj); | |
+ llvm::Value* phi_tagged = __ CreateIntToPtr(phi, Types::tagged); | |
+ instr->set_llvm_value(phi_tagged); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoLoadFunctionPrototype(HLoadFunctionPrototype* instr) { | |
+ llvm::BasicBlock* insert = __ GetInsertBlock(); | |
+ llvm::Value* function = Use(instr->function()); | |
+ llvm::BasicBlock* equal = NewBlock("LoadFunctionPrototype Equal"); | |
+ llvm::BasicBlock* done = NewBlock("LoadFunctionPrototype Done"); | |
+ | |
+ // Get the prototype or initial map from the function. | |
+ llvm::Value* load_func = LoadFieldOperand(function, | |
+ JSFunction::kPrototypeOrInitialMapOffset); | |
+ | |
+ // Check that the function has a prototype or an initial map. | |
+ llvm::Value* cmp_root = CompareRoot(load_func, Heap::kTheHoleValueRootIndex); | |
+ DeoptimizeIf(cmp_root, Deoptimizer::kHole); | |
+ | |
+ // If the function does not have an initial map, we're done. | |
+ llvm::Value* result = LoadFieldOperand(load_func, HeapObject::kMapOffset); | |
+ llvm::Value* map = LoadFieldOperand(result, Map::kInstanceTypeOffset); | |
+ llvm::Value* map_type = __ getInt64(static_cast<int>(MAP_TYPE)); | |
+ llvm::Value* cmp_type = __ CreateICmpNE(map, map_type); | |
+ __ CreateCondBr(cmp_type, done, equal); | |
+ | |
+ __ SetInsertPoint(equal); | |
+ // Get the prototype from the initial map. | |
+ llvm::Value* get_prototype = LoadFieldOperand(load_func, | |
+ Map::kPrototypeOffset); | |
+ | |
+ __ CreateBr(done); | |
+ __ SetInsertPoint(done); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::i64, 2); | |
+ phi->addIncoming(load_func, insert); | |
+ phi->addIncoming(get_prototype, equal); | |
+ instr->set_llvm_value(phi); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { | |
+ //TODO: Not tested, test case string-base64.js in base64ToString finction | |
+ llvm::Value* context = Use(instr->context()); | |
+ llvm::Value* global_object = Use(instr->global_object()); | |
+ llvm::Value* name = MoveHeapObject(instr->name()); | |
+ | |
+ AllowDeferredHandleDereference vector_structure_check; | |
+ Handle<TypeFeedbackVector> feedback_vector = instr->feedback_vector(); | |
+ llvm::Value* vector = MoveHeapObject(feedback_vector); | |
+ FeedbackVectorSlot instr_slot = instr->slot(); | |
+ int index = feedback_vector->GetIndex(instr_slot); | |
+ llvm::Value* slot = __ getInt64(index); | |
+ | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap; | |
+ Handle<Code> ic = | |
+ CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(), | |
+ SLOPPY, PREMONOMORPHIC).code(); | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(context); | |
+ params.push_back(global_object); | |
+ params.push_back(name); | |
+ params.push_back(vector); | |
+ params.push_back(slot); | |
+ auto result = CallCode(ic, llvm::CallingConv::X86_64_V8_S9, params); | |
+ instr->set_llvm_value(result); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { | |
+ if (instr->is_fixed_typed_array()) { | |
+ DoLoadKeyedExternalArray(instr); | |
+ } else if (instr->representation().IsDouble()) { | |
+ DoLoadKeyedFixedDoubleArray(instr); | |
+ } else { | |
+ DoLoadKeyedFixedArray(instr); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoLoadKeyedExternalArray(HLoadKeyed* instr) { | |
+ //TODO: not tested string-validate-input.js in doTest | |
+ //TODO: Compare generated asm while testing | |
+ HValue* key = instr->key(); | |
+ ElementsKind kind = instr->elements_kind(); | |
+ int32_t base_offset = instr->base_offset(); | |
+ | |
+ if (kPointerSize == kInt32Size && !key->IsConstant()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ | |
+ llvm::Value* elements = Use(instr->elements()); | |
+ llvm::Value* address = BuildFastArrayOperand(key, elements, | |
+ kind, base_offset); | |
+ if (kind == FLOAT32_ELEMENTS) { | |
+ auto casted_address = __ CreateBitCast(address, Types::ptr_float32); | |
+ auto load = __ CreateLoad(casted_address); | |
+ auto result = __ CreateFPExt(load, Types::float64); | |
+ instr->set_llvm_value(result); | |
+ // UNIMPLEMENTED(); | |
+ } else if (kind == FLOAT64_ELEMENTS) { | |
+ auto casted_address = __ CreateBitCast(address, Types::ptr_float64); | |
+ auto load = __ CreateLoad(casted_address); | |
+ instr->set_llvm_value(load); | |
+ } else { | |
+ // TODO(llvm): DRY: hoist the common part. | |
+ switch (kind) { | |
+ case INT8_ELEMENTS: { | |
+ auto casted_address = __ CreateBitCast(address, Types::ptr_i8); | |
+ auto load = __ CreateLoad(casted_address); | |
+ llvm::Value* result = __ CreateSExt(load, Types::i32); | |
+ instr->set_llvm_value(result); | |
+ break; | |
+ } | |
+ case UINT8_ELEMENTS: | |
+ case UINT8_CLAMPED_ELEMENTS:{ | |
+ //movzxbl(result, operand) | |
+ auto casted_address = __ CreateBitCast(address, Types::ptr_i8); | |
+ auto load = __ CreateLoad(casted_address); | |
+ llvm::Value* result = __ CreateZExt(load, Types::i32); | |
+ instr->set_llvm_value(result); | |
+ break; | |
+ } | |
+ case INT16_ELEMENTS: { | |
+ auto casted_address = __ CreateBitOrPointerCast(address, Types::ptr_i16); | |
+ auto load = __ CreateLoad(casted_address); | |
+ auto extended = __ CreateSExt(load, Types::i32); | |
+ instr->set_llvm_value(extended); | |
+ break; | |
+ } | |
+ case UINT16_ELEMENTS: { | |
+ auto casted_address = __ CreateBitOrPointerCast(address, Types::ptr_i16); | |
+ auto load = __ CreateLoad(casted_address); | |
+ auto extended = __ CreateZExt(load, Types::i32); | |
+ instr->set_llvm_value(extended); | |
+ break; | |
+ } | |
+ case INT32_ELEMENTS: { | |
+ auto casted_address = __ CreateBitCast(address, Types::ptr_i32); | |
+ auto load = __ CreateLoad(casted_address); | |
+ instr->set_llvm_value(load); | |
+ break; | |
+ } | |
+ case UINT32_ELEMENTS: { | |
+ auto casted_address = __ CreateBitCast(address, Types::ptr_i32); | |
+ auto load = __ CreateLoad(casted_address); | |
+ instr->set_llvm_value(load); | |
+ if (!instr->CheckFlag(HInstruction::kUint32)) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ break; | |
+ } | |
+ case FLOAT32_ELEMENTS: | |
+ case FLOAT64_ELEMENTS: | |
+ case FAST_ELEMENTS: | |
+ case FAST_SMI_ELEMENTS: | |
+ case FAST_DOUBLE_ELEMENTS: | |
+ case FAST_HOLEY_ELEMENTS: | |
+ case FAST_HOLEY_SMI_ELEMENTS: | |
+ case FAST_HOLEY_DOUBLE_ELEMENTS: | |
+ case DICTIONARY_ELEMENTS: | |
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS: | |
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: { | |
+ UNREACHABLE(); | |
+ break; | |
+ } | |
+ } | |
+ } | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::BuildFastArrayOperand(HValue* key, | |
+ llvm::Value* elements, | |
+ ElementsKind elements_kind, | |
+ uint32_t inst_offset) { | |
+ llvm::Value* address = nullptr; | |
+ int shift_size = ElementsKindToShiftSize(elements_kind); | |
+ if (key->IsConstant()) { | |
+ uint32_t const_val = (HConstant::cast(key))->Integer32Value(); | |
+ address = ConstructAddress(elements, (const_val << shift_size) + inst_offset); | |
+ } else { | |
+ llvm::Value* lkey = Use(key); | |
+ llvm::Value* scale = nullptr; | |
+ llvm::Value* offset = nullptr; | |
+ int scale_factor; | |
+ switch (shift_size) { | |
+ case 0: | |
+ scale_factor = 1; | |
+ break; | |
+ case 1: | |
+ scale_factor = 2; | |
+ break; | |
+ case 2: | |
+ scale_factor = 4; | |
+ break; | |
+ case 3: | |
+ scale_factor = 8; | |
+ break; | |
+ default: | |
+ UNIMPLEMENTED(); | |
+ } | |
+ if (key->representation().IsInteger32()) { | |
+ scale = __ getInt32(scale_factor); | |
+ offset = __ getInt32(inst_offset); | |
+ } else { | |
+ scale = __ getInt64(scale_factor); | |
+ offset = __ getInt64(inst_offset); | |
+ } | |
+ llvm::Value* mul = __ CreateMul(lkey, scale); | |
+ llvm::Value* add = __ CreateAdd(mul, offset); | |
+ llvm::Value* int_ptr = __ CreateIntToPtr(elements, Types::ptr_i8); | |
+ address = __ CreateGEP(int_ptr, add); | |
+ } | |
+ return address; | |
+} | |
+ | |
+void LLVMChunkBuilder::DoLoadKeyedFixedDoubleArray(HLoadKeyed* instr) { | |
+ HValue* key = instr->key(); | |
+ uint32_t inst_offset = instr->base_offset(); | |
+ if (kPointerSize == kInt32Size && !key->IsConstant() && | |
+ instr->IsDehoisted()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ if (instr->RequiresHoleCheck()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ llvm::Value* address = BuildFastArrayOperand(key, Use(instr->elements()), | |
+ FAST_DOUBLE_ELEMENTS, inst_offset); | |
+ auto casted_address = __ CreateBitCast(address, Types::ptr_float64); | |
+ llvm::Value* load = __ CreateLoad(casted_address); | |
+ instr->set_llvm_value(load); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoLoadKeyedFixedArray(HLoadKeyed* instr) { | |
+ HValue* key = instr->key(); | |
+ Representation representation = instr->representation(); | |
+ bool requires_hole_check = instr->RequiresHoleCheck(); | |
+ uint32_t inst_offset = instr->base_offset(); | |
+ if (kPointerSize == kInt32Size && !key->IsConstant() && | |
+ instr->IsDehoisted()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ if (representation.IsInteger32() && SmiValuesAre32Bits() && | |
+ instr->elements_kind() == FAST_SMI_ELEMENTS) { | |
+ DCHECK(!requires_hole_check); | |
+ if (FLAG_debug_code) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ DCHECK(kSmiTagSize + kSmiShiftSize == 32); | |
+ inst_offset += kPointerSize / 2; | |
+ } | |
+ llvm::Value* address = BuildFastArrayOperand(key, Use(instr->elements()), | |
+ FAST_DOUBLE_ELEMENTS, inst_offset); | |
+ llvm::Value* casted_address = nullptr; | |
+ auto pointer_type = GetLLVMType(instr->representation())->getPointerTo(); | |
+ casted_address = __ CreateBitCast(address, pointer_type); | |
+ llvm::Value* load = __ CreateLoad(casted_address); | |
+ | |
+ if (requires_hole_check) { | |
+ if (IsFastSmiElementsKind(instr->elements_kind())) { | |
+ bool check_non_smi = true; | |
+ llvm::Value* cmp = SmiCheck(load, check_non_smi); | |
+ DeoptimizeIf(cmp, Deoptimizer::kNotASmi); | |
+ } else { | |
+ // FIXME(access-nsieve): not tested | |
+ llvm::Value* cmp = CompareRoot(load, Heap::kTheHoleValueRootIndex); | |
+ DeoptimizeIf(cmp, Deoptimizer::kHole); | |
+ } | |
+ } else if (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { | |
+ DCHECK(instr->elements_kind() == FAST_HOLEY_ELEMENTS); | |
+ llvm::BasicBlock* merge = NewBlock("DoLoadKeyedFixedArray merge"); | |
+ llvm::BasicBlock* after_cmp = NewBlock("DoLoadKeyedFixedArray after cmp"); | |
+ llvm::BasicBlock* insert = __ GetInsertBlock(); | |
+ llvm::Value* cmp = CompareRoot(load, Heap::kTheHoleValueRootIndex, | |
+ llvm::CmpInst::ICMP_NE); | |
+ __ CreateCondBr(cmp, merge, after_cmp); | |
+ | |
+ __ SetInsertPoint(after_cmp); | |
+ llvm::BasicBlock* check_info = NewBlock("DoLoadKeyedFixedArray check_info"); | |
+ if (info()->IsStub()) { | |
+ // A stub can safely convert the hole to undefined only if the array | |
+ // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise | |
+ // it needs to bail out. | |
+ | |
+ //You should be jump to check_info block | |
+ //DeoptimizeIf(cond, check_info); | |
+ UNIMPLEMENTED(); | |
+ } else { | |
+ __ CreateBr(check_info); | |
+ } | |
+ __ SetInsertPoint(check_info); | |
+ auto undefined = MoveHeapObject(isolate()->factory()->undefined_value()); | |
+ __ CreateBr(merge); | |
+ | |
+ __ SetInsertPoint(merge); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::tagged, 2); | |
+ phi->addIncoming(undefined, check_info); | |
+ phi->addIncoming(load, insert); | |
+ instr->set_llvm_value(phi); | |
+ return; | |
+ } | |
+ instr->set_llvm_value(load); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { | |
+ llvm::Value* obj = Use(instr->object()); | |
+ llvm::Value* context = Use(instr->context()); | |
+ llvm::Value* key = Use(instr->key()); | |
+ | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(context); | |
+ params.push_back(obj); | |
+ params.push_back(key); | |
+ if (instr->HasVectorAndSlot()) { | |
+ AllowDeferredHandleDereference vector_structure_check; | |
+ Handle<TypeFeedbackVector> handle_vector = instr->feedback_vector(); | |
+ llvm::Value* vector = MoveHeapObject(handle_vector); | |
+ FeedbackVectorSlot feedback_slot = instr->slot(); | |
+ int index = handle_vector->GetIndex(feedback_slot); | |
+ Smi* smi = Smi::FromInt(index); | |
+ llvm::Value* slot = ValueFromSmi(smi); | |
+ params.push_back(vector); | |
+ params.push_back(slot); | |
+ } | |
+ | |
+ AllowHandleAllocation allow_handles; | |
+ Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode( | |
+ isolate(), instr->language_mode(), | |
+ instr->initialization_state()).code(); | |
+ | |
+ llvm::Value* result = nullptr; | |
+ if (instr->HasVectorAndSlot()) { | |
+ result = CallCode(ic, llvm::CallingConv::X86_64_V8_S9, params); | |
+ } else { | |
+ result = CallCode(ic, llvm::CallingConv::X86_64_V8_S5, params); | |
+ } | |
+ instr->set_llvm_value(result); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { | |
+ HObjectAccess access = instr->access(); | |
+ int offset = access.offset(); | |
+ if (access.IsExternalMemory()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ | |
+ if (instr->representation().IsDouble()) { | |
+ llvm::Value* address = FieldOperand(Use(instr->object()), offset); | |
+ llvm::Value* cast_double = __ CreateBitCast(address, Types::ptr_float64); | |
+ llvm::Value* result = __ CreateLoad(cast_double); | |
+ instr->set_llvm_value(result); | |
+ return; | |
+ } | |
+ llvm::Value* obj_arg = Use(instr->object()); | |
+ if (!access.IsInobject()) { | |
+ obj_arg = LoadFieldOperand(obj_arg, JSObject::kPropertiesOffset); | |
+ } | |
+ | |
+ Representation representation = access.representation(); | |
+ if (representation.IsSmi() && SmiValuesAre32Bits() && | |
+ instr->representation().IsInteger32()) { | |
+ if(FLAG_debug_code) { | |
+ UNIMPLEMENTED(); | |
+ // TODO(llvm): | |
+ // Load(scratch, FieldOperand(object, offset), representation); | |
+ // AssertSmi(scratch); | |
+ } | |
+ STATIC_ASSERT(kSmiTag == 0); | |
+ DCHECK(kSmiTagSize + kSmiShiftSize == 32); | |
+ offset += kPointerSize / 2; | |
+ representation = Representation::Integer32(); | |
+ } | |
+ | |
+ llvm::Value* obj = FieldOperand(obj_arg, offset); | |
+ if (instr->representation().IsInteger32()) { | |
+ llvm::Value* casted_address = __ CreateBitCast(obj, Types::ptr_i32); | |
+ llvm::Value* res = __ CreateLoad(casted_address); | |
+ instr->set_llvm_value(res); | |
+ } else { | |
+ DCHECK_EQ(GetLLVMType(instr->representation()), Types::tagged); | |
+ llvm::Value* casted_address = __ CreateBitCast(obj, Types::ptr_tagged); | |
+ llvm::Value* res = __ CreateLoad(casted_address); | |
+ instr->set_llvm_value(res); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) { | |
+ DCHECK(instr->object()->representation().IsTagged()); | |
+ | |
+ llvm::Value* obj = Use(instr->object()); | |
+ llvm::Value* context = Use(instr->context()); | |
+ | |
+ Handle<Object> handle_name = instr->name(); | |
+ llvm::Value* name = MoveHeapObject(handle_name); | |
+ | |
+ AllowDeferredHandleDereference vector_structure_check; | |
+ Handle<TypeFeedbackVector> feedback_vector = instr->feedback_vector(); | |
+ llvm::Value* vector = MoveHeapObject(feedback_vector); | |
+ FeedbackVectorSlot instr_slot = instr->slot(); | |
+ int index = feedback_vector->GetIndex(instr_slot); | |
+ Smi* smi = Smi::FromInt(index); | |
+ llvm::Value* slot = ValueFromSmi(smi); | |
+ | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap; | |
+ | |
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode( | |
+ isolate(), NOT_INSIDE_TYPEOF, | |
+ instr->language_mode(), | |
+ instr->initialization_state()).code(); | |
+ | |
+ // TODO(llvm): RecordSafepointWithLazyDeopt (and reloc info) + MarkAsCall | |
+ | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(context); | |
+ params.push_back(obj); | |
+ params.push_back(name); | |
+ params.push_back(vector); | |
+ params.push_back(slot); | |
+ | |
+ auto result = CallCode(ic, llvm::CallingConv::X86_64_V8_S9, params); | |
+ instr->set_llvm_value(result); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoLoadRoot(HLoadRoot* instr) { | |
+ llvm::Value* load_r = LoadRoot(instr->index()); | |
+ instr->set_llvm_value(load_r); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) { | |
+ llvm::Value* val = EnumLength(Use(instr->value())); | |
+ llvm::Value* smi_tmp_val = __ CreateZExt(val, Types::i64); | |
+ llvm::Value* smi_val = __ CreateShl(smi_tmp_val, kSmiShift); | |
+ instr->set_llvm_value(smi_val); | |
+ //UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) { | |
+ llvm::Function* floor_intrinsic = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::floor, Types::float64); | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(Use(instr->value())); | |
+ llvm::Value* floor = __ CreateCall(floor_intrinsic, params); | |
+ llvm::Value* casted_int = __ CreateFPToSI(floor, Types::i64); | |
+ // FIXME: Figure out why we need this step. Fix for bitops-nsieve-bits | |
+ auto result = __ CreateTruncOrBitCast(casted_int, Types::i32); | |
+ instr->set_llvm_value(result); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoMathMinMax(HMathMinMax* instr) { | |
+ llvm::Value* left = Use(instr->left()); | |
+ llvm::Value* right = Use(instr->right()); | |
+ llvm::Value* left_near; | |
+ llvm::Value* cmpl_result; | |
+ llvm::BasicBlock* near = NewBlock("NEAR"); | |
+ llvm::BasicBlock* return_block = NewBlock("MIN MAX RETURN"); | |
+ HMathMinMax::Operation operation = instr->operation(); | |
+ llvm::BasicBlock* insert_block = __ GetInsertBlock(); | |
+ bool cond_for_min = (operation == HMathMinMax::kMathMin); | |
+ | |
+ if (instr->representation().IsSmiOrInteger32()) { | |
+ if (instr->right()->IsConstant()) { | |
+ DCHECK(SmiValuesAre32Bits() | |
+ ? !instr->representation().IsSmi() | |
+ : SmiValuesAre31Bits()); | |
+ int32_t right_value = (HConstant::cast(instr->right()))->Integer32Value(); | |
+ llvm::Value* right_imm = __ getInt32(right_value); | |
+ | |
+ if (cond_for_min) { | |
+ cmpl_result = __ CreateICmpSLT(left, right_imm); | |
+ } else { | |
+ cmpl_result = __ CreateICmpSGT(left, right_imm); | |
+ } | |
+ __ CreateCondBr(cmpl_result, return_block, near); | |
+ __ SetInsertPoint(near); | |
+ left_near = right_imm; | |
+ } else { | |
+ if (cond_for_min) { | |
+ cmpl_result = __ CreateICmpSLT(left, right); | |
+ } else { | |
+ cmpl_result = __ CreateICmpSGT(left, right); | |
+ } | |
+ __ CreateCondBr(cmpl_result, return_block, near); | |
+ __ SetInsertPoint(near); | |
+ left_near = right; | |
+ } | |
+ __ CreateBr(return_block); | |
+ __ SetInsertPoint(return_block); | |
+ | |
+ llvm::PHINode* phi = __ CreatePHI(Types::i32, 2); | |
+ phi->addIncoming(left_near, near); | |
+ phi->addIncoming(left, insert_block); | |
+ instr->set_llvm_value(phi); | |
+ } else { | |
+ if (cond_for_min) { | |
+ llvm::Function* fmin_intrinsic = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::minnum, Types::float64); | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(left); | |
+ params.push_back(right); | |
+ llvm::Value* fmin = __ CreateCall(fmin_intrinsic, params); | |
+ instr->set_llvm_value(fmin); | |
+ } else { | |
+ llvm::Function* fmax_intrinsic = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::maxnum, Types::float64); | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(left); | |
+ params.push_back(right); | |
+ llvm::Value* fmax = __ CreateCall(fmax_intrinsic, params); | |
+ instr->set_llvm_value(fmax); | |
+ } | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoMod(HMod* instr) { | |
+ if (instr->representation().IsSmiOrInteger32()) { | |
+ if (instr->RightIsPowerOf2()) { | |
+ DoModByPowerOf2I(instr); | |
+ } else if (instr->right()->IsConstant()) { | |
+ DoModByConstI(instr); | |
+ } else { | |
+ DoModI(instr); | |
+ } | |
+ } else if (instr->representation().IsDouble()) { | |
+ llvm::Value* left = Use(instr->left()); | |
+ llvm::Value* right = Use(instr->right()); | |
+ llvm::Value* result = __ CreateFRem(left, right); | |
+ instr->set_llvm_value(result); | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoModByConstI(HMod* instr) { | |
+ int32_t divisor_val = (HConstant::cast(instr->right()))->Integer32Value(); | |
+ if (divisor_val == 0) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ auto left = Use(instr->left()); | |
+ auto right = __ getInt32(divisor_val); | |
+ auto result = __ CreateSRem(left, right); | |
+ | |
+ // Check for negative zero. | |
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
+ llvm::BasicBlock* remainder_not_zero = NewBlock("DoModByConstI" | |
+ " remainder_not_zero"); | |
+ llvm::BasicBlock* remainder_zero = NewBlock("DoModByConstI" | |
+ " remainder_zero"); | |
+ llvm::Value* zero = __ getInt32(0); | |
+ llvm::Value* cmp = __ CreateICmpNE(result, zero); | |
+ __ CreateCondBr(cmp, remainder_not_zero, remainder_zero); | |
+ | |
+ __ SetInsertPoint(remainder_zero); | |
+ llvm::Value* cmp_divident = __ CreateICmpSLT(left, zero); | |
+ DeoptimizeIf(cmp_divident, Deoptimizer::kMinusZero, false, | |
+ remainder_not_zero); | |
+ __ SetInsertPoint(remainder_not_zero); | |
+ } | |
+ instr->set_llvm_value(result); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoModByPowerOf2I(HMod* instr) { | |
+ llvm::BasicBlock* is_not_negative = NewBlock("DoModByPowerOf2I" | |
+ " divident is not negative"); | |
+ llvm::BasicBlock* negative = NewBlock("DoModByPowerOf2I negative"); | |
+ llvm::BasicBlock* done = NewBlock("DoModByPowerOf2I done"); | |
+ | |
+ llvm::Value* dividend = Use(instr->left()); | |
+ int32_t divisor = instr->right()->GetInteger32Constant(); | |
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | |
+ llvm::Value* l_mask = __ getInt32(mask); | |
+ bool canNegative = instr->CheckFlag(HValue::kLeftCanBeNegative); | |
+ int phi_count = 1; | |
+ llvm::Value* div1 = nullptr; | |
+ if (canNegative) { | |
+ phi_count++; | |
+ llvm::Value* zero = __ getInt32(0); | |
+ llvm::Value* cmp = __ CreateICmpSGT(dividend, zero); | |
+ __ CreateCondBr(cmp, is_not_negative, negative); | |
+ | |
+ __ SetInsertPoint(negative); | |
+ llvm::Value* neg_divident = __ CreateNeg(dividend); | |
+ llvm::Value* temp = __ CreateAnd(neg_divident, l_mask); | |
+ div1 = __ CreateNeg(temp); | |
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
+ llvm::Value* cmp = __ CreateICmpEQ(div1, zero); | |
+ DeoptimizeIf(cmp, Deoptimizer::kMinusZero); | |
+ } | |
+ __ CreateBr(done); | |
+ } | |
+ else { | |
+ __ CreateBr(is_not_negative); | |
+ } | |
+ | |
+ __ SetInsertPoint(is_not_negative); | |
+ llvm::Value* div2 = __ CreateAnd(dividend, l_mask); | |
+ __ CreateBr(done); | |
+ | |
+ __ SetInsertPoint(done); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::i32, phi_count); | |
+ if (canNegative) | |
+ phi->addIncoming(div1, negative); | |
+ phi->addIncoming(div2, is_not_negative); | |
+ instr->set_llvm_value(phi); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoModI(HMod* instr) { | |
+ llvm::Value* left = Use(instr->left()); | |
+ llvm::Value* right = Use(instr->right()); | |
+ llvm::Value* zero = __ getInt32(0); | |
+ llvm::BasicBlock* done = NewBlock("DoModI done"); | |
+ llvm::Value* result = nullptr; | |
+ llvm::Value* div_res = nullptr; | |
+ if (instr->CheckFlag(HValue::kCanBeDivByZero)) { | |
+ llvm::Value* is_zero = __ CreateICmpEQ(right, zero); | |
+ DeoptimizeIf(is_zero, Deoptimizer::kDivisionByZero); | |
+ } | |
+ | |
+ int phi_in = 1; | |
+ llvm::BasicBlock* after_cmp_one = nullptr; | |
+ if (instr->CheckFlag(HValue::kCanOverflow)) { | |
+ after_cmp_one = NewBlock("DoModI after compare minus one"); | |
+ llvm::BasicBlock* possible_overflow = NewBlock("DoModI possible_overflow"); | |
+ llvm::BasicBlock* no_overflow_possible = NewBlock("DoModI " | |
+ "no_overflow_possible"); | |
+ llvm::Value* min_int = __ getInt32(kMinInt); | |
+ llvm::Value* left_is_min_int = __ CreateICmpEQ(left, min_int); | |
+ __ CreateCondBr(left_is_min_int, possible_overflow, no_overflow_possible); | |
+ | |
+ __ SetInsertPoint(possible_overflow); | |
+ llvm::Value* minus_one = __ getInt32(-1); | |
+ llvm::Value* right_is_minus_one = __ CreateICmpEQ(right, minus_one); | |
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
+ DeoptimizeIf(right_is_minus_one, Deoptimizer::kMinusZero); | |
+ __ CreateBr(no_overflow_possible); | |
+ } else { | |
+ phi_in++; | |
+ __ CreateCondBr(right_is_minus_one, after_cmp_one, no_overflow_possible); | |
+ __ SetInsertPoint(after_cmp_one); | |
+ result = zero; | |
+ __ CreateBr(done); | |
+ } | |
+ __ SetInsertPoint(no_overflow_possible); | |
+ } | |
+ | |
+ llvm::BasicBlock* negative = nullptr; | |
+ llvm::BasicBlock* positive = nullptr; | |
+ | |
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
+ phi_in++; | |
+ negative = NewBlock("DoModI left is negative"); | |
+ positive = NewBlock("DoModI left is positive"); | |
+ llvm::Value* cmp_sign = __ CreateICmpSGT(left, zero); | |
+ __ CreateCondBr(cmp_sign, positive, negative); | |
+ | |
+ __ SetInsertPoint(negative); | |
+ div_res = __ CreateSRem(left, right); | |
+ llvm::Value* cmp_zero = __ CreateICmpEQ(div_res, zero); | |
+ DeoptimizeIf(cmp_zero, Deoptimizer::kMinusZero); | |
+ __ CreateBr(done); | |
+ | |
+ __ SetInsertPoint(positive); | |
+ } | |
+ | |
+ llvm::BasicBlock* insert = __ GetInsertBlock(); | |
+ llvm::Value* div = __ CreateSRem(left, right); | |
+ __ CreateBr(done); | |
+ | |
+ __ SetInsertPoint(done); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::i32, phi_in); | |
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
+ phi->addIncoming(div_res, negative); | |
+ phi->addIncoming(div, positive); | |
+ } else { | |
+ phi->addIncoming(div, insert); | |
+ } | |
+ if (instr->CheckFlag(HValue::kCanOverflow) && | |
+ !instr->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
+ phi->addIncoming(result, after_cmp_one); | |
+ } | |
+ instr->set_llvm_value(phi); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoMul(HMul* instr) { | |
+ if(instr->representation().IsSmiOrInteger32()) { | |
+ DCHECK(instr->left()->representation().Equals(instr->representation())); | |
+ DCHECK(instr->right()->representation().Equals(instr->representation())); | |
+ HValue* left = instr->left(); | |
+ HValue* right = instr->right(); | |
+ llvm::Value* llvm_left = Use(left); | |
+ llvm::Value* llvm_right = Use(right); | |
+ bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); | |
+ // TODO(llvm): use raw mul, not the intrinsic, if (!can_overflow). | |
+ llvm::Value* overflow = nullptr; | |
+ if (instr->representation().IsSmi()) { | |
+ // FIXME (llvm): | |
+ // 1) Minus Zero?? Important | |
+ // 2) see if we can refactor using SmiToInteger32() or the like | |
+ auto type = Types::i64; | |
+ llvm::Value* shift = __ CreateAShr(llvm_left, 32); | |
+ llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::smul_with_overflow, type); | |
+ | |
+ llvm::Value* params[] = { shift, llvm_right }; | |
+ llvm::Value* call = __ CreateCall(intrinsic, params); | |
+ | |
+ llvm::Value* mul = __ CreateExtractValue(call, 0); | |
+ overflow = __ CreateExtractValue(call, 1); | |
+ instr->set_llvm_value(mul); | |
+ } else { | |
+ auto type = Types::i32; | |
+ llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::smul_with_overflow, type); | |
+ | |
+ llvm::Value* params[] = { llvm_left, llvm_right }; | |
+ llvm::Value* call = __ CreateCall(intrinsic, params); | |
+ | |
+ llvm::Value* mul = __ CreateExtractValue(call, 0); | |
+ overflow = __ CreateExtractValue(call, 1); | |
+ instr->set_llvm_value(mul); | |
+ } | |
+ if (can_overflow) DeoptimizeIf(overflow, Deoptimizer::kOverflow); | |
+ } else if (instr->representation().IsDouble()) { | |
+ DCHECK(instr->representation().IsDouble()); | |
+ DCHECK(instr->left()->representation().IsDouble()); | |
+ DCHECK(instr->right()->representation().IsDouble()); | |
+ HValue* left = instr->left(); | |
+ HValue* right = instr->right(); | |
+ llvm::Value* fMul = __ CreateFMul(Use(left), Use(right), ""); | |
+ instr->set_llvm_value(fMul); | |
+ } | |
+ else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoOsrEntry(HOsrEntry* instr) { | |
+ int arg_count = graph()->osr()->UnoptimizedFrameSlots(); | |
+ std::string arg_offset = std::to_string(arg_count * kPointerSize); | |
+ std::string asm_string1 = "add $$"; | |
+ std::string asm_string2 = ", %rsp"; | |
+ std::string final_strig = asm_string1 + arg_offset + asm_string2; | |
+ auto inl_asm_f_type = llvm::FunctionType::get(__ getVoidTy(), false); | |
+ llvm::InlineAsm* inline_asm = llvm::InlineAsm::get( | |
+ inl_asm_f_type, final_strig, "~{dirflag},~{fpsr},~{flags}", true); | |
+ __ CreateCall(inline_asm); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoPower(HPower* instr) { | |
+ Representation exponent_type = instr->right()->representation(); | |
+ | |
+ if (exponent_type.IsSmi()) { | |
+ UNIMPLEMENTED(); | |
+ } else if (exponent_type.IsTagged()) { | |
+ llvm::Value* tagged_exponent = Use(instr->right()); | |
+ llvm::Value* is_smi = SmiCheck(tagged_exponent, false); | |
+ llvm::BasicBlock* deopt = NewBlock("DoPower CheckObjType"); | |
+ llvm::BasicBlock* no_deopt = NewBlock("DoPower No Deoptimize"); | |
+ __ CreateCondBr(is_smi, no_deopt, deopt); | |
+ __ SetInsertPoint(deopt); | |
+ llvm::Value* cmp = CmpObjectType(tagged_exponent, | |
+ HEAP_NUMBER_TYPE, llvm::CmpInst::ICMP_NE); | |
+ DeoptimizeIf(cmp, Deoptimizer::kNotAHeapNumber, false, no_deopt); | |
+ | |
+ __ SetInsertPoint(no_deopt); | |
+ MathPowStub stub(isolate(), MathPowStub::TAGGED); | |
+ Handle<Code> code = Handle<Code>::null(); | |
+ { | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap; | |
+ code = stub.GetCode(); | |
+ // FIXME(llvm,gc): respect reloc info mode... | |
+ } | |
+ std::vector<llvm::Value*> params; | |
+ for (int i = 0; i < instr->OperandCount(); i++) | |
+ params.push_back(Use(instr->OperandAt(i))); | |
+ llvm::Value* call = CallAddress(code->entry(), | |
+ llvm::CallingConv::X86_64_V8_S2, | |
+ params, Types::float64); | |
+ instr->set_llvm_value(call); | |
+ | |
+ } else if (exponent_type.IsInteger32()) { | |
+ MathPowStub stub(isolate(), MathPowStub::INTEGER); | |
+ Handle<Code> code = Handle<Code>::null(); | |
+ { | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap; | |
+ code = stub.GetCode(); | |
+ // FIXME(llvm,gc): respect reloc info mode... | |
+ } | |
+ std::vector<llvm::Value*> params; | |
+ for (int i = 0; i < instr->OperandCount(); i++) | |
+ params.push_back(Use(instr->OperandAt(i))); | |
+ llvm::Value* call = CallAddress(code->entry(), | |
+ llvm::CallingConv::X86_64_V8_S2, | |
+ params, Types::float64); | |
+ instr->set_llvm_value(call); | |
+ } else { | |
+ //UNIMPLEMENTED(); | |
+ MathPowStub stub(isolate(), MathPowStub::DOUBLE); | |
+ Handle<Code> code = Handle<Code>::null(); | |
+ { | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap; | |
+ code = stub.GetCode(); | |
+ // FIXME(llvm,gc): respect reloc info mode... | |
+ } | |
+ std::vector<llvm::Value*> params; | |
+ for (int i = 0; i < instr->OperandCount(); i++) | |
+ params.push_back(Use(instr->OperandAt(i))); | |
+ llvm::Value* call = CallAddress(code->entry(), | |
+ llvm::CallingConv::X86_64_V8_S2, | |
+ params, Types::float64); | |
+ instr->set_llvm_value(call); | |
+ } | |
+} | |
+ | |
+llvm::Value* LLVMChunkBuilder::RegExpLiteralSlow(HValue* instr, | |
+ llvm::Value* phi) { | |
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; | |
+ | |
+ Smi* smi_size = Smi::FromInt(size); | |
+ DCHECK(pending_pushed_args_.is_empty()); | |
+ pending_pushed_args_.Add(ValueFromSmi(smi_size), info()->zone()); | |
+ pending_pushed_args_.Add(phi, info()->zone()); | |
+ llvm::Value* call_result = CallRuntimeViaId(Runtime::kAllocateInNewSpace); | |
+ pending_pushed_args_.Clear(); | |
+ return call_result; | |
+} | |
+ | |
+void LLVMChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) { | |
+ //TODO: not tested string-validate-input.js in doTest | |
+ llvm::BasicBlock* materialized = NewBlock("DoRegExpLiteral materialized"); | |
+ llvm::BasicBlock* near = NewBlock("DoRegExpLiteral near"); | |
+ llvm::BasicBlock* input = __ GetInsertBlock(); | |
+ int literal_offset = | |
+ FixedArray::OffsetOfElementAt(instr->literal_index()); | |
+ llvm::Value* literals = MoveHeapObject(instr->literals()); | |
+ llvm::Value* fild_literal = LoadFieldOperand(literals, literal_offset); | |
+ auto cmp_root = CompareRoot(fild_literal, Heap::kUndefinedValueRootIndex, | |
+ llvm::CmpInst::ICMP_NE); | |
+ __ CreateCondBr(cmp_root, materialized, near); | |
+ __ SetInsertPoint(near); | |
+ DCHECK(pending_pushed_args_.is_empty()); | |
+ Smi* index = Smi::FromInt(literal_offset); | |
+ pending_pushed_args_.Add(literals, info()->zone()); | |
+ pending_pushed_args_.Add(ValueFromSmi(index), info()->zone()); | |
+ pending_pushed_args_.Add(MoveHeapObject(instr->pattern()), info()->zone()); | |
+ pending_pushed_args_.Add(MoveHeapObject(instr->flags()), info()->zone()); | |
+ llvm::Value* call_result = CallRuntimeViaId(Runtime::kMaterializeRegExpLiteral); | |
+ pending_pushed_args_.Clear(); | |
+ __ CreateBr(materialized); | |
+ | |
+ __ SetInsertPoint(materialized); | |
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; | |
+ llvm::Value* l_size = __ getInt32(size); | |
+ //TODO(llvm) impement Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT); | |
+ // jmp(&allocated, Label::kNear); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::tagged, 2); | |
+ phi->addIncoming(call_result, near); | |
+ phi->addIncoming(fild_literal, input); | |
+ llvm::Value* (LLVMChunkBuilder::*fptr)(HValue*, llvm::Value*); | |
+ fptr = &LLVMChunkBuilder::RegExpLiteralSlow; | |
+ llvm::Value* value = Allocate(l_size, fptr, TAG_OBJECT, nullptr, phi); | |
+ | |
+ llvm::Value* temp = nullptr; //rdx | |
+ llvm::Value* temp2 = nullptr; //rcx | |
+ for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { | |
+ temp = LoadFieldOperand(value, i); | |
+ temp2 = LoadFieldOperand(value, i + kPointerSize); | |
+ llvm::Value* ptr = __ CreateIntToPtr(phi, Types::ptr_i8); | |
+ llvm::Value* address = __ CreateGEP(ptr, __ getInt32(i)); | |
+ address = __ CreateBitCast(address, Types::ptr_tagged); | |
+ __ CreateStore(temp, address); | |
+ llvm::Value* address2 = __ CreateGEP(ptr, __ getInt32(i + kPointerSize)); | |
+ address2 = __ CreateBitCast(address2, Types::ptr_tagged); | |
+ __ CreateStore(temp2, address2); | |
+ } | |
+ if ((size % (2 * kPointerSize)) != 0) { | |
+ temp = LoadFieldOperand(value, size - kPointerSize); // rdx | |
+ llvm::Value* ptr = __ CreateIntToPtr(phi, Types::ptr_i8); | |
+ llvm::Value* address = __ CreateGEP(ptr, __ getInt32(size - kPointerSize)); | |
+ llvm::Value* casted_address = __ CreateBitCast(address, Types::ptr_tagged); | |
+ __ CreateStore(temp, casted_address); | |
+ } | |
+ instr->set_llvm_value(value); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoRor(HRor* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoSar(HSar* instr) { | |
+ if(instr->representation().IsInteger32() || instr->representation().IsSmi()) { | |
+ DCHECK(instr->left()->representation().Equals(instr->representation())); | |
+ DCHECK(instr->right()->representation().Equals(instr->representation())); | |
+ HValue* left = instr->left(); | |
+ HValue* right = instr->right(); | |
+ llvm::Value* AShr = __ CreateAShr(Use(left), Use(right), "Sar"); | |
+ instr->set_llvm_value(AShr); | |
+ } | |
+ else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoShl(HShl* instr) { | |
+ if(instr->representation().IsInteger32() || instr->representation().IsSmi()) { | |
+ DCHECK(instr->left()->representation().Equals(instr->representation())); | |
+ DCHECK(instr->right()->representation().Equals(instr->representation())); | |
+ HValue* left = instr->left(); | |
+ HValue* right = instr->right(); | |
+ llvm::Value* Shl = __ CreateShl(Use(left), Use(right),""); | |
+ instr->set_llvm_value(Shl); | |
+ } | |
+ else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoShr(HShr* instr) { | |
+ if(instr->representation().IsInteger32() || instr->representation().IsSmi()) { | |
+ DCHECK(instr->left()->representation().Equals(instr->representation())); | |
+ DCHECK(instr->right()->representation().Equals(instr->representation())); | |
+ HValue* left = instr->left(); | |
+ HValue* right = instr->right(); | |
+ llvm::Value* LShr = __ CreateLShr(Use(left), Use(right),""); | |
+ instr->set_llvm_value(LShr); | |
+ } | |
+ else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoStoreCodeEntry(HStoreCodeEntry* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { | |
+ //TODO: not tested | |
+ llvm::Value* context = Use(instr->context()); | |
+ llvm::Value* value = Use(instr->value()); | |
+ int offset = Context::SlotOffset(instr->slot_index()); | |
+ | |
+ if (instr->RequiresHoleCheck()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ | |
+ llvm::Value* target = ConstructAddress(context, offset); | |
+ llvm::Value* casted_address = nullptr; | |
+ | |
+ if (instr->value()->representation().IsTagged()) | |
+ casted_address = __ CreateBitCast(target, Types::ptr_tagged); | |
+ else | |
+ casted_address = __ CreateBitCast(target, Types::ptr_i64); | |
+ | |
+ __ CreateStore(value, casted_address); | |
+ if (instr->NeedsWriteBarrier()) { | |
+ int slot_offset = Context::SlotOffset(instr->slot_index()); | |
+ enum SmiCheck check_needed = instr->value()->type().IsHeapObject() | |
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | |
+ RecordWriteField(context, | |
+ value, | |
+ slot_offset + kHeapObjectTag, | |
+ check_needed, | |
+ kPointersToHereMaybeInteresting, | |
+ EMIT_REMEMBERED_SET); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { | |
+ if (instr->is_fixed_typed_array()) { | |
+ DoStoreKeyedExternalArray(instr); | |
+ } else if (instr->value()->representation().IsDouble()) { | |
+ DoStoreKeyedFixedDoubleArray(instr); | |
+ } else { | |
+ DoStoreKeyedFixedArray(instr); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoStoreKeyedExternalArray(HStoreKeyed* instr) { | |
+ //TODO: not tested string-validate-input.js in doTest | |
+ ElementsKind elements_kind = instr->elements_kind(); | |
+ uint32_t offset = instr->base_offset(); | |
+ llvm::Value* casted_address = nullptr; | |
+ llvm::Value* store = nullptr; | |
+ HValue* key = instr->key(); | |
+ | |
+ if (kPointerSize == kInt32Size && !key->IsConstant()) { | |
+ Representation key_representation = | |
+ instr->key()->representation(); | |
+ if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) { | |
+ UNIMPLEMENTED(); | |
+ } else if (instr->IsDehoisted()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ } | |
+ llvm::Value* address = BuildFastArrayOperand(key, Use(instr->elements()), | |
+ elements_kind, offset); | |
+ if (elements_kind == FLOAT32_ELEMENTS) { | |
+ casted_address = __ CreateBitCast(address, Types::ptr_float32); | |
+ auto result = __ CreateFPTrunc(Use(instr->value()), Types::float32); | |
+ store = __ CreateStore(result, casted_address); | |
+ instr->set_llvm_value(store); | |
+ } else if (elements_kind == FLOAT64_ELEMENTS) { | |
+ casted_address = __ CreateBitCast(address, Types::ptr_float64); | |
+ auto store = __ CreateStore(Use(instr->value()), casted_address); | |
+ instr->set_llvm_value(store); | |
+ } else { | |
+ switch (elements_kind) { | |
+ case INT8_ELEMENTS: | |
+ case UINT8_ELEMENTS: | |
+ case UINT8_CLAMPED_ELEMENTS: { | |
+ casted_address = __ CreateBitCast(address, Types::ptr_i8); | |
+ auto result = __ CreateTruncOrBitCast(Use(instr->value()), Types::i8); | |
+ store = __ CreateStore(result, casted_address); | |
+ instr->set_llvm_value(store); | |
+ break; | |
+ } | |
+ case INT16_ELEMENTS: | |
+ case UINT16_ELEMENTS: { | |
+ auto casted_address = __ CreateBitCast(address, Types::ptr_i16); | |
+ auto result = __ CreateTruncOrBitCast(Use(instr->value()), Types::i16); | |
+ auto store = __ CreateStore(result, casted_address); | |
+ instr->set_llvm_value(store); | |
+ break; | |
+ } | |
+ case INT32_ELEMENTS: | |
+ case UINT32_ELEMENTS: | |
+ casted_address = __ CreateBitCast(address, Types::ptr_i32); | |
+ store = __ CreateStore(Use(instr->value()), casted_address); | |
+ instr->set_llvm_value(store); | |
+ break; | |
+ case FLOAT32_ELEMENTS: | |
+ case FLOAT64_ELEMENTS: | |
+ case FAST_ELEMENTS: | |
+ case FAST_SMI_ELEMENTS: | |
+ case FAST_DOUBLE_ELEMENTS: | |
+ case FAST_HOLEY_ELEMENTS: | |
+ case FAST_HOLEY_SMI_ELEMENTS: | |
+ case FAST_HOLEY_DOUBLE_ELEMENTS: | |
+ case DICTIONARY_ELEMENTS: | |
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS: | |
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: | |
+ UNREACHABLE(); | |
+ break; | |
+ } | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoStoreKeyedFixedDoubleArray(HStoreKeyed* instr) { | |
+ HValue* key = instr->key(); | |
+ llvm::Value* value = Use(instr->value()); | |
+ uint32_t inst_offset = instr->base_offset(); | |
+ ElementsKind elements_kind = instr->elements_kind(); | |
+ if (kPointerSize == kInt32Size && !key->IsConstant() | |
+ && instr->IsDehoisted()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ llvm::Value* canonical_value = value; | |
+ if (instr->NeedsCanonicalization()) { | |
+ UNIMPLEMENTED(); | |
+ llvm::Function* canonicalize = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::canonicalize, Types::float64); | |
+ llvm::Value* params[] = { value }; | |
+ canonical_value = __ CreateCall(canonicalize, params); | |
+ | |
+ } | |
+ llvm::Value* address = BuildFastArrayOperand(key, Use(instr->elements()), | |
+ elements_kind, inst_offset); | |
+ llvm::Value* casted_address = __ CreateBitCast(address, Types::ptr_float64); | |
+ llvm::Value* Store = __ CreateStore(canonical_value, casted_address); | |
+ instr->set_llvm_value(Store); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoStoreKeyedFixedArray(HStoreKeyed* instr) { | |
+ HValue* key = instr->key(); | |
+ Representation representation = instr->value()->representation(); | |
+ ElementsKind elements_kind = instr->elements_kind(); | |
+ uint32_t inst_offset = instr->base_offset(); | |
+ if (kPointerSize == kInt32Size && !key->IsConstant() && | |
+ instr->IsDehoisted()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ if (representation.IsInteger32() && SmiValuesAre32Bits()) { | |
+ DCHECK(instr->store_mode() == STORE_TO_INITIALIZED_ENTRY); | |
+ DCHECK(instr->elements_kind() == FAST_SMI_ELEMENTS); | |
+ if (FLAG_debug_code) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ inst_offset += kPointerSize / 2; | |
+ | |
+ } | |
+ llvm::Value* address = BuildFastArrayOperand(key, Use(instr->elements()), | |
+ elements_kind, inst_offset); | |
+ HValue* hValue = instr->value(); | |
+ llvm::Value* store = nullptr; | |
+ llvm::Value* casted_address = nullptr; | |
+ | |
+ if (!hValue->IsConstant() || hValue->representation().IsSmi() || | |
+ hValue->representation().IsInteger32()) { | |
+ auto pointer_type = GetLLVMType(hValue->representation())->getPointerTo(); | |
+ casted_address = __ CreateBitOrPointerCast(address, pointer_type); | |
+ store = __ CreateStore(Use(hValue), casted_address); | |
+ } else { | |
+ DCHECK(hValue->IsConstant()); | |
+ HConstant* constant = HConstant::cast(instr->value()); | |
+ Handle<Object> handle_value = constant->handle(isolate()); | |
+ casted_address = __ CreateBitOrPointerCast(address, Types::ptr_tagged); | |
+ auto llvm_val = MoveHeapObject(handle_value); | |
+ store = __ CreateStore(llvm_val, casted_address); | |
+ } | |
+ instr->set_llvm_value(store); | |
+ if (instr->NeedsWriteBarrier()) { | |
+ llvm::Value* elements = Use(instr->elements()); | |
+ llvm::Value* value = Use(instr->value()); | |
+ enum SmiCheck check_needed = instr->value()->type().IsHeapObject() | |
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | |
+ // FIXME(llvm): kSaveFPRegs | |
+ RecordWrite(elements, | |
+ casted_address, | |
+ value, | |
+ instr->PointersToHereCheckForValue(), | |
+ EMIT_REMEMBERED_SET, | |
+ check_needed); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::RecordWriteField(llvm::Value* object, | |
+ llvm::Value* value, | |
+ int offset, | |
+ enum SmiCheck smi_check, | |
+ PointersToHereCheck ptr_check, | |
+ RememberedSetAction remembered_set) { | |
+ //FIXME: Not sure this is right | |
+ //TODO: Find a way to test this function | |
+ llvm::BasicBlock* done = NewBlock("RecordWriteField done"); | |
+ if (smi_check == INLINE_SMI_CHECK) { | |
+ llvm::BasicBlock* current_block = NewBlock("RecordWriteField Smi checked"); | |
+ // Skip barrier if writing a smi. | |
+ llvm::Value* smi_cond = SmiCheck(value, false);//JumpIfSmi(value, &done); | |
+ __ CreateCondBr(smi_cond, done, current_block); | |
+ __ SetInsertPoint(current_block); | |
+ } | |
+ | |
+ DCHECK(IsAligned(offset, kPointerSize)); | |
+ auto map_address = FieldOperand(object, offset); | |
+ map_address = __ CreateBitOrPointerCast(map_address, Types::tagged); | |
+ | |
+ if (emit_debug_code()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ | |
+ RecordWrite(object, map_address, value, ptr_check, remembered_set, | |
+ OMIT_SMI_CHECK); | |
+ __ CreateBr(done); | |
+ __ SetInsertPoint(done); | |
+ | |
+ if (emit_debug_code()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::RecordWrite(llvm::Value* object, | |
+ llvm::Value* address, | |
+ llvm::Value* value, | |
+ PointersToHereCheck ptr_check, | |
+ RememberedSetAction remembered_set_action, | |
+ enum SmiCheck smi_check) { | |
+ AssertNotSmi(object); | |
+ | |
+ if (remembered_set_action == OMIT_REMEMBERED_SET && | |
+ !FLAG_incremental_marking) { | |
+ return; | |
+ } | |
+ | |
+ if (emit_debug_code()) { | |
+ // WRONG: LoadFieldOperand (FieldOperand) subtracts kHeapObject tag, | |
+ // Operand does not | |
+// Assert(Compare(value, LoadFieldOperand(key_reg, 0))); | |
+ UNIMPLEMENTED(); | |
+ } | |
+ auto stub_block = NewBlock("RecordWrite after checked page flag"); | |
+ llvm::BasicBlock* done = NewBlock("RecordWrite done"); | |
+ | |
+ if (smi_check == INLINE_SMI_CHECK) { | |
+ llvm::BasicBlock* current_block = NewBlock("RecordWrite Smi checked"); | |
+ // Skip barrier if writing a smi. | |
+ llvm::Value* smi_cond = SmiCheck(value, false);//JumpIfSmi(value, &done); | |
+ __ CreateCondBr(smi_cond, done, current_block); | |
+ __ SetInsertPoint(current_block); | |
+ } | |
+ | |
+ if (ptr_check != kPointersToHereAreAlwaysInteresting) { | |
+ auto equal = CheckPageFlag(value, | |
+ MemoryChunk::kPointersToHereAreInterestingMask); | |
+ llvm::BasicBlock* after_page_check = NewBlock("RecordWrite page check"); | |
+ __ CreateCondBr(equal, done, after_page_check); | |
+ __ SetInsertPoint(after_page_check); | |
+ } | |
+ | |
+ auto equal = CheckPageFlag(object, | |
+ MemoryChunk::kPointersFromHereAreInterestingMask); | |
+ __ CreateCondBr(equal, done, stub_block); | |
+ | |
+ __ SetInsertPoint(stub_block); | |
+ Register object_reg = rbx; | |
+ Register map_reg = rcx; | |
+ Register dst_reg = rdx; | |
+ RecordWriteStub stub(isolate(), object_reg, map_reg, dst_reg, | |
+ remembered_set_action, kSaveFPRegs); | |
+ Handle<Code> code = Handle<Code>::null(); | |
+ { | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap_alloc; | |
+ code = stub.GetCode(); | |
+ // FIXME(llvm,gc): respect reloc info mode... | |
+ } | |
+ std::vector<llvm::Value*> params = { object, value, address }; | |
+ CallCode(code, llvm::CallingConv::X86_64_V8_RWS, params, true); | |
+ __ CreateBr(done); | |
+ | |
+ __ SetInsertPoint(done); | |
+ | |
+ // Count number of write barriers in generated code. | |
+ isolate()->counters()->write_barriers_static()->Increment(); | |
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { | |
+ DCHECK(instr->object()->representation().IsTagged()); | |
+ DCHECK(instr->key()->representation().IsTagged()); | |
+ DCHECK(instr->value()->representation().IsTagged()); | |
+ if (instr->HasVectorAndSlot()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ | |
+ Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode( | |
+ isolate(), instr->language_mode(), | |
+ instr->initialization_state()).code(); | |
+ | |
+ | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(Use(instr->context())); | |
+ params.push_back(Use(instr->object())); | |
+ params.push_back(Use(instr->value())); | |
+ params.push_back(Use(instr->key())); | |
+ auto result = CallCode(ic, llvm::CallingConv::X86_64_V8_S7, params); | |
+ instr->set_llvm_value(result); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { | |
+ Representation representation = instr->field_representation(); | |
+ | |
+ HObjectAccess access = instr->access(); | |
+ int offset = access.offset() - 1; | |
+ | |
+ if (access.IsExternalMemory()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ | |
+ AssertNotSmi(Use(instr->object())); | |
+ | |
+ if (!FLAG_unbox_double_fields && representation.IsDouble()) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ | |
+ if (instr->has_transition()) { | |
+ Handle<Map> transition = instr->transition_map(); | |
+ AddDeprecationDependency(transition); | |
+ if (!instr->NeedsWriteBarrierForMap()) { | |
+ llvm::Value* heap_transition = MoveHeapObject(transition); | |
+ llvm::Value* ptr = __ CreateIntToPtr(Use(instr->object()), Types::ptr_i8); | |
+ llvm::Value* address = FieldOperand(ptr, HeapObject::kMapOffset); | |
+ llvm::Value* casted_address = __ CreateBitCast(address, Types::ptr_tagged); | |
+ __ CreateStore(heap_transition, casted_address); | |
+ } else { | |
+ llvm::Value* scratch = MoveHeapObject(transition); | |
+ llvm::Value* obj_addr = FieldOperand(Use(instr->object()), | |
+ HeapObject::kMapOffset); | |
+ auto casted_address = __ CreateBitCast(obj_addr, Types::ptr_tagged); | |
+ __ CreateStore(scratch, casted_address); | |
+ RecordWriteForMap(Use(instr->object()), scratch); | |
+ } | |
+ } | |
+ | |
+ // Do the store. | |
+ llvm::Value* obj_arg = Use(instr->object()); | |
+ if (!access.IsInobject()) { | |
+ obj_arg = LoadFieldOperand(obj_arg, JSObject::kPropertiesOffset); | |
+ } | |
+ | |
+ if (representation.IsSmi() && SmiValuesAre32Bits() && | |
+ instr->value()->representation().IsInteger32()) { | |
+ DCHECK(instr->store_mode() == STORE_TO_INITIALIZED_ENTRY); | |
+ if (FLAG_debug_code) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ // Store int value directly to upper half of the smi. | |
+ STATIC_ASSERT(kSmiTag == 0); | |
+ DCHECK(kSmiTagSize + kSmiShiftSize == 32); | |
+ offset += kPointerSize / 2; | |
+ representation = Representation::Integer32(); | |
+ } | |
+ | |
+ //Operand operand = FieldOperand(write_register, offset); | |
+ | |
+ if (FLAG_unbox_double_fields && representation.IsDouble()) { | |
+ UNIMPLEMENTED(); | |
+ DCHECK(access.IsInobject()); | |
+ llvm::Value* obj_address = ConstructAddress(Use(instr->object()), offset); | |
+ llvm::Value* casted_obj_add = __ CreateBitCast(obj_address, | |
+ Types::ptr_float64); | |
+ llvm::Value* value = Use(instr->value()); | |
+ __ CreateStore(value, casted_obj_add); | |
+ return; | |
+ } else { | |
+ HValue* hValue = instr->value(); | |
+ if (hValue->representation().IsInteger32()) { | |
+ llvm::Value* store_address = ConstructAddress(obj_arg, offset); | |
+ llvm::Value* casted_adderss = __ CreateBitCast(store_address, | |
+ Types::ptr_i32); | |
+ llvm::Value* casted_value = __ CreateBitCast(Use(hValue), Types::i32); | |
+ __ CreateStore(casted_value, casted_adderss); | |
+ } else if (hValue->representation().IsSmi() || !hValue->IsConstant()){ | |
+ llvm::Value* store_address = ConstructAddress(obj_arg, offset); | |
+ auto pointer_type = GetLLVMType(hValue->representation())->getPointerTo(); | |
+ llvm::Value* casted_adderss = __ CreateBitCast(store_address, | |
+ pointer_type); | |
+ __ CreateStore(Use(hValue), casted_adderss); | |
+ } else { | |
+ DCHECK(hValue->IsConstant()); | |
+ { | |
+ AllowHandleAllocation allow_handles; //TODO: Why we need this? | |
+ HConstant* constant = HConstant::cast(instr->value()); | |
+ Handle<Object> handle_value = constant->handle(isolate()); | |
+ llvm::Value* store_address = ConstructAddress(obj_arg, | |
+ offset); | |
+ llvm::Value* casted_adderss = __ CreateBitCast(store_address, | |
+ Types::ptr_tagged); | |
+ auto llvm_val = MoveHeapObject(handle_value); | |
+ __ CreateStore(llvm_val, casted_adderss); | |
+ } | |
+ | |
+ } | |
+ } | |
+ | |
+ if (instr->NeedsWriteBarrier()) { | |
+ //FIXME: Not sure this is right | |
+ //TODO: Find a way to test this case | |
+ RecordWriteField(obj_arg, | |
+ Use(instr->value()), | |
+ offset + 1, | |
+ instr->SmiCheckForWriteBarrier(), | |
+ instr->PointersToHereCheckForValue(), | |
+ EMIT_REMEMBERED_SET); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { | |
+ llvm::Value* context = Use(instr->context()); | |
+ llvm::Value* object = Use(instr->object()); | |
+ llvm::Value* value = Use(instr->value()); | |
+ llvm::Value* name_reg = MoveHeapObject(instr->name()); | |
+ AllowHandleAllocation allow_handles_allocation; | |
+ Handle<Code> ic = | |
+ StoreIC::initialize_stub(isolate(), instr->language_mode(), | |
+ instr->initialization_state()); | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(context); | |
+ params.push_back(object); | |
+ params.push_back(value); | |
+ params.push_back(name_reg); | |
+ for (int i = pending_pushed_args_.length() - 1; i >= 0; i--) | |
+ params.push_back(pending_pushed_args_[i]); | |
+ pending_pushed_args_.Clear(); | |
+ llvm::Value* call = CallCode(ic, llvm::CallingConv::X86_64_V8_S7, params); | |
+ llvm::Value* return_val = __ CreatePtrToInt(call,Types::i64); | |
+ instr->set_llvm_value(return_val); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoStringAdd(HStringAdd* instr) { | |
+ StringAddStub stub(isolate(), | |
+ instr->flags(), | |
+ instr->pretenure_flag()); | |
+ | |
+ Handle<Code> code = Handle<Code>::null(); | |
+ { | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap; | |
+ code = stub.GetCode(); | |
+ // FIXME(llvm,gc): respect reloc info mode... | |
+ } | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(Use(instr->context())); | |
+ for (int i = 1; i < instr->OperandCount() ; ++i) | |
+ params.push_back(Use(instr->OperandAt(i))); | |
+ llvm::Value* call = CallCode(code, llvm::CallingConv::X86_64_V8_S10, params); | |
+ instr->set_llvm_value(call); | |
+} | |
+ | |
+// TODO(llvm): this is a horrible function. | |
+// At least do something about this mess with types. | |
+// And test it thoroughly... | |
+void LLVMChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { | |
+ //TODO: Find scripts to tests other paths | |
+ llvm::BasicBlock* insert = __ GetInsertBlock(); | |
+ llvm::Value* str = Use(instr->string()); | |
+ llvm::Value* index = Use(instr->index()); | |
+ llvm::BasicBlock* deferred = NewBlock("StringCharCodeAt Deferred"); | |
+ llvm::BasicBlock* set_value = NewBlock("StringCharCodeAt End"); | |
+ llvm::Value* map_offset = LoadFieldOperand(str, HeapObject::kMapOffset); | |
+ llvm::Value* instance_type = LoadFieldOperand(map_offset, | |
+ Map::kInstanceTypeOffset); | |
+ llvm::Value* casted_instance_type = __ CreatePtrToInt(instance_type, | |
+ Types::i64); | |
+ //movzxbl | |
+ llvm::Value* result_type = __ CreateAnd(casted_instance_type, | |
+ __ getInt64(0x000000ff)); | |
+ llvm::BasicBlock* check_sequental = NewBlock("StringCharCodeAt" | |
+ " CheckSequental"); | |
+ llvm::BasicBlock* check_seq_cont = NewBlock("StringCharCodeAt" | |
+ " CheckSequental Cont"); | |
+ llvm::Value* and_IndirectStringMask = __ CreateAnd(result_type, | |
+ __ getInt64(kIsIndirectStringMask)); | |
+ llvm::Value* cmp_IndirectStringMask = __ CreateICmpEQ(and_IndirectStringMask, | |
+ __ getInt64(0)); | |
+ __ CreateCondBr(cmp_IndirectStringMask, check_sequental, check_seq_cont); | |
+ | |
+ __ SetInsertPoint(check_seq_cont); | |
+ llvm::BasicBlock* cons_str = NewBlock("StringCharCodeAt IsConsString"); | |
+ llvm::BasicBlock* cons_str_cont = NewBlock("StringCharCodeAt NotConsString"); | |
+ llvm::Value* and_NotConsMask = __ CreateAnd(result_type, | |
+ __ getInt64(kSlicedNotConsMask)); | |
+ llvm::Value* cmp_NotConsMask = __ CreateICmpEQ(and_NotConsMask, __ getInt64(0)); | |
+ __ CreateCondBr(cmp_NotConsMask, cons_str, cons_str_cont); | |
+ | |
+ __ SetInsertPoint(cons_str_cont); | |
+ llvm::BasicBlock* indirect_string_loaded = NewBlock("StringCharCodeAt Indirect String"); | |
+ llvm::Value* address = LoadFieldOperand(str, SlicedString::kOffsetOffset + kSmiShift / kBitsPerByte); | |
+ llvm::Value* casted_address = __ CreatePtrToInt(address, Types::i32); | |
+ | |
+ // TODO Do wee need ptr_i32 here? | |
+ llvm::Value* cons_index = __ CreateAdd(index, casted_address); | |
+ llvm::Value* cons_string = LoadFieldOperand(str, SlicedString::kParentOffset); | |
+ __ CreateBr(indirect_string_loaded); | |
+ | |
+ __ SetInsertPoint(cons_str); | |
+ llvm::BasicBlock* cmp_root_cont = NewBlock("StringCharCodeAt" | |
+ " ConsStr CompareRoot Cont"); | |
+ llvm::Value* string_second_offset = LoadFieldOperand(str, | |
+ ConsString::kSecondOffset); | |
+ llvm::Value* cmp_root = CompareRoot(string_second_offset, | |
+ Heap::kempty_stringRootIndex); | |
+ __ CreateCondBr(cmp_root, cmp_root_cont, deferred); | |
+ | |
+ __ SetInsertPoint(cmp_root_cont); | |
+ llvm::Value* after_cmp_root_str = LoadFieldOperand(str, | |
+ ConsString::kFirstOffset); | |
+ __ CreateBr(indirect_string_loaded); | |
+ | |
+ __ SetInsertPoint(indirect_string_loaded); | |
+ llvm::PHINode* phi_string = __ CreatePHI(Types::tagged, 2); | |
+ phi_string->addIncoming(after_cmp_root_str, cmp_root_cont); | |
+ phi_string->addIncoming(cons_string, cons_str_cont); | |
+ | |
+ llvm::PHINode* index_indirect = __ CreatePHI(Types::i32, 2); | |
+ index_indirect->addIncoming(cons_index, cons_str_cont); | |
+ index_indirect->addIncoming(index, cmp_root_cont); | |
+ | |
+ llvm::Value* indirect_map = LoadFieldOperand(phi_string, | |
+ HeapObject::kMapOffset); | |
+ llvm::Value* indirect_instance = LoadFieldOperand(indirect_map, | |
+ Map::kInstanceTypeOffset); | |
+ indirect_instance = __ CreateBitOrPointerCast(indirect_instance, Types::i64); | |
+ llvm::Value* indirect_result_type = __ CreateAnd(indirect_instance, | |
+ __ getInt64(0x000000ff)); | |
+ __ CreateBr(check_sequental); | |
+ | |
+ __ SetInsertPoint(check_sequental); | |
+ STATIC_ASSERT(kSeqStringTag == 0); | |
+ llvm::BasicBlock* seq_string = NewBlock("StringCharCodeAt SeqString"); | |
+ llvm::BasicBlock* cont_inside_seq = NewBlock("StringCharCodeAt SeqString cont"); | |
+ llvm::PHINode* phi_result_type = __ CreatePHI(Types::i64, 2); | |
+ phi_result_type->addIncoming(indirect_result_type, indirect_string_loaded); | |
+ phi_result_type->addIncoming(result_type, insert); | |
+ | |
+ llvm::PHINode* phi_index = __ CreatePHI(Types::i32, 2); | |
+ phi_index->addIncoming(index_indirect, indirect_string_loaded); | |
+ phi_index->addIncoming(index, insert); | |
+ | |
+ llvm::PHINode* phi_str = __ CreatePHI(Types::tagged, 2); | |
+ phi_str->addIncoming(str, insert); | |
+ phi_str->addIncoming(phi_string, indirect_string_loaded); | |
+ | |
+ llvm::Value* and_representation = __ CreateAnd(phi_result_type, | |
+ __ getInt64(kStringRepresentationMask)); | |
+ llvm::Value* cmp_representation = __ CreateICmpEQ(and_representation, | |
+ __ getInt64(0)); | |
+ __ CreateCondBr(cmp_representation, seq_string, cont_inside_seq); | |
+ | |
+ __ SetInsertPoint(cont_inside_seq); | |
+ llvm::BasicBlock* extern_string = NewBlock("StringCharCodeAt" | |
+ " CheckShortExternelString"); | |
+ if (FLAG_debug_code) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ STATIC_ASSERT(kShortExternalStringTag != 0); | |
+ llvm::Value* and_short_tag = __ CreateAnd(phi_result_type, | |
+ __ getInt64(kShortExternalStringTag)); | |
+ llvm::Value* cmp_short_tag = __ CreateICmpNE(and_short_tag, __ getInt64(0)); | |
+ __ CreateCondBr(cmp_short_tag, deferred, extern_string); | |
+ | |
+ __ SetInsertPoint(extern_string); | |
+ STATIC_ASSERT(kTwoByteStringTag == 0); | |
+ llvm::BasicBlock* one_byte_external = NewBlock("StringCharCodeAt" | |
+ " OneByteExternal"); | |
+ llvm::BasicBlock* two_byte_external = NewBlock("StringCharCodeAt" | |
+ " TwiByteExternal"); | |
+ | |
+ llvm::Value* and_encoding_mask = __ CreateAnd(phi_result_type, | |
+ __ getInt64(kStringEncodingMask)); | |
+ llvm::Value* not_encoding_mask = __ CreateICmpNE(and_encoding_mask, | |
+ __ getInt64(0)); | |
+ llvm::Value* external_string = LoadFieldOperand(phi_str, | |
+ ExternalString::kResourceDataOffset); | |
+ __ CreateCondBr(not_encoding_mask, one_byte_external, two_byte_external); | |
+ | |
+ __ SetInsertPoint(two_byte_external); | |
+ llvm::BasicBlock* done = NewBlock("StringCharCodeAt Done"); | |
+ llvm::Value* two_byte_offset = __ CreateMul(phi_index, __ getInt32(2)); | |
+ llvm::Value* base_casted_two_ext = __ CreateBitOrPointerCast(external_string, | |
+ Types::ptr_i8); | |
+ llvm::Value* two_byte_address = __ CreateGEP(base_casted_two_ext, | |
+ two_byte_offset); | |
+ llvm::Value* casted_addr_two_ext = __ CreatePointerCast(two_byte_address, | |
+ Types::ptr_tagged); | |
+ llvm::Value* two_byte_ex_load = __ CreateLoad(casted_addr_two_ext); | |
+ two_byte_ex_load = __ CreateBitOrPointerCast(two_byte_ex_load, Types::i64); | |
+ llvm::Value* two_byte_external_result = __ CreateAnd(two_byte_ex_load, | |
+ __ getInt64(0x0000ffff)); | |
+ __ CreateBr(done); | |
+ | |
+ __ SetInsertPoint(one_byte_external); | |
+ llvm::Value* one_byte_offset = __ CreateAdd(phi_index, | |
+ __ getInt32(kHeapObjectTag)); | |
+ llvm::Value* base_casted_one_ext = __ CreateIntToPtr(external_string, | |
+ Types::ptr_i8); | |
+ llvm::Value* one_byte_addr_ext = __ CreateGEP(base_casted_one_ext, | |
+ one_byte_offset); | |
+ llvm::Value* casted_addr_one_ext = __ CreatePointerCast(one_byte_addr_ext, | |
+ Types::ptr_tagged); | |
+ llvm::Value* add_result_one = __ CreateLoad(casted_addr_one_ext); | |
+ add_result_one = __ CreateBitOrPointerCast(add_result_one, Types::i64); | |
+ llvm::Value* one_byte_external_result = __ CreateAnd(add_result_one, | |
+ __ getInt64(0x000000ff)); | |
+ __ CreateBr(done); | |
+ | |
+ __ SetInsertPoint(seq_string); | |
+ llvm::BasicBlock* one_byte = NewBlock("StringCharCodeAt OneByte"); | |
+ llvm::BasicBlock* two_byte = NewBlock("StringCharCodeAt TwoByte"); | |
+ STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); | |
+ STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); | |
+ llvm::Value* base_string = __ CreatePtrToInt(phi_str, Types::i64); | |
+ llvm::Value* phi_index64 = __ CreateIntCast(phi_index, Types::i64, true); | |
+ llvm::Value* and_seq_str = __ CreateAnd(phi_result_type, | |
+ __ getInt64(kStringEncodingMask)); | |
+ llvm::Value* seq_not_zero = __ CreateICmpNE(and_seq_str, __ getInt64(0)); | |
+ __ CreateCondBr(seq_not_zero, one_byte, two_byte); | |
+ | |
+ __ SetInsertPoint(two_byte); | |
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); | |
+ llvm::Value* two_byte_index = __ CreateMul(phi_index64, __ getInt64(2)); | |
+ llvm::Value* two_byte_add_index = __ CreateAdd(two_byte_index, | |
+ __ getInt64(SeqTwoByteString::kHeaderSize-1)); | |
+ llvm::Value* address_two = __ CreateAdd(base_string, two_byte_add_index); | |
+ llvm::Value* casted_adds_two = __ CreateIntToPtr(address_two, Types::ptr_i64); | |
+ llvm::Value* two_byte_load = __ CreateLoad(casted_adds_two); | |
+ llvm::Value* two_byte_result = __ CreateAnd(two_byte_load, | |
+ __ getInt64(0x0000ffff)); | |
+ __ CreateBr(done); | |
+ | |
+ __ SetInsertPoint(one_byte); | |
+ llvm::Value* one_byte_add_index = __ CreateAdd(phi_index64, | |
+ __ getInt64(SeqTwoByteString::kHeaderSize - 1)); | |
+ llvm::Value* address_one = __ CreateAdd(base_string, one_byte_add_index); | |
+ llvm::Value* casted_adds_one = __ CreateIntToPtr(address_one, Types::ptr_i64); | |
+ | |
+ llvm::Value* one_byte_load = __ CreateLoad(casted_adds_one); | |
+ llvm::Value* one_result = __ CreateIntCast(one_byte_load, Types::i64, true); | |
+ llvm::Value* one_byte_result = __ CreateAnd(one_result, __ getInt64(0x000000ff)); | |
+ __ CreateBr(done); | |
+ | |
+ __ SetInsertPoint(done); | |
+ llvm::PHINode* result_gen = __ CreatePHI(Types::i64, 4); | |
+ result_gen->addIncoming(one_byte_external_result, one_byte_external); | |
+ result_gen->addIncoming(two_byte_external_result, two_byte_external); | |
+ result_gen->addIncoming(one_byte_result, one_byte); | |
+ result_gen->addIncoming(two_byte_result, two_byte); | |
+ __ CreateBr(set_value); | |
+ | |
+ __ SetInsertPoint(deferred); | |
+ llvm::Value* str_def = Use(instr->string()); | |
+ | |
+ std::vector<llvm::Value*> params; | |
+ //TODO : implement non constant case | |
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); | |
+ if (instr->index()->IsConstant()) { | |
+ UNIMPLEMENTED(); | |
+ } else { | |
+ llvm::Value* const_index = Integer32ToSmi(instr->index()); | |
+ params.push_back(const_index); | |
+ } | |
+ params.push_back(str_def); | |
+ llvm::Value* call = CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, | |
+ Use(instr->context()), | |
+ params); | |
+ llvm::Value* call_casted = __ CreatePtrToInt(call, Types::i64); | |
+ __ CreateBr(set_value); | |
+ | |
+ __ SetInsertPoint(set_value); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::i64, 2); | |
+ phi->addIncoming(result_gen, done); | |
+ phi->addIncoming(call_casted, deferred); | |
+ auto result = __ CreateTruncOrBitCast(phi, Types::i32); | |
+ instr->set_llvm_value(result); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) { | |
+ //TODO:Fast case implementation | |
+ std::vector<llvm::Value*> args; | |
+ llvm::Value* arg1 = Integer32ToSmi(instr->value()); | |
+ args.push_back(arg1); | |
+ llvm::Value* result = CallRuntimeFromDeferred(Runtime::kCharFromCode, Use(instr->context()), args); | |
+ instr->set_llvm_value(result); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoStringCompareAndBranch(HStringCompareAndBranch* instr) { | |
+ //TODO: not tested string-validate-input.js in doTest | |
+ llvm::Value* context = Use(instr->context()); | |
+ llvm::Value* left = Use(instr->left()); | |
+ llvm::Value* right = Use(instr->right()); | |
+ Token::Value op = instr->token(); | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap; | |
+ Handle<Code> ic = CodeFactory::StringCompare(isolate()).code(); | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(context); | |
+ params.push_back(left); | |
+ params.push_back(right); | |
+ llvm::Value* result = CallCode(ic, llvm::CallingConv::X86_64_V8_S10, params); | |
+ llvm::Value* return_val = __ CreatePtrToInt(result, Types::i64); | |
+ //TODO (Jivan) It seems redudant | |
+ llvm::Value* test = __ CreateAnd(return_val, return_val); | |
+ llvm::CmpInst::Predicate pred = TokenToPredicate(op, false, false); | |
+ llvm::Value* cmp = __ CreateICmp(pred, test, __ getInt64(0)); | |
+ llvm::BranchInst* branch = __ CreateCondBr(cmp, Use(instr->SuccessorAt(0)), | |
+ Use(instr->SuccessorAt(1))); | |
+ instr->set_llvm_value(branch); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoSub(HSub* instr) { | |
+ if(instr->representation().IsInteger32() || instr->representation().IsSmi()) { | |
+ DCHECK(instr->left()->representation().Equals(instr->representation())); | |
+ DCHECK(instr->right()->representation().Equals(instr->representation())); | |
+ HValue* left = instr->left(); | |
+ HValue* right = instr->right(); | |
+ if (!instr->CheckFlag(HValue::kCanOverflow)) { | |
+ llvm::Value* sub = __ CreateSub(Use(left), Use(right), ""); | |
+ instr->set_llvm_value(sub); | |
+ } else { | |
+ auto type = instr->representation().IsSmi() ? Types::i64 : Types::i32; | |
+ llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration( | |
+ module_.get(), llvm::Intrinsic::ssub_with_overflow, type); | |
+ llvm::Value* params[] = { Use(left), Use(right) }; | |
+ llvm::Value* call = __ CreateCall(intrinsic, params); | |
+ llvm::Value* sub = __ CreateExtractValue(call, 0); | |
+ llvm::Value* overflow = __ CreateExtractValue(call, 1); | |
+ DeoptimizeIf(overflow, Deoptimizer::kOverflow); | |
+ instr->set_llvm_value(sub); | |
+ } | |
+ } else if (instr->representation().IsDouble()) { | |
+ DCHECK(instr->representation().IsDouble()); | |
+ DCHECK(instr->left()->representation().IsDouble()); | |
+ DCHECK(instr->right()->representation().IsDouble()); | |
+ HValue* left = instr->left(); | |
+ HValue* right = instr->right(); | |
+ llvm::Value* fSub = __ CreateFSub(Use(left), Use(right), ""); | |
+ instr->set_llvm_value(fSub); | |
+ } else if(instr->representation().IsTagged()) { | |
+ llvm::Value* context = Use(instr->context()); | |
+ llvm::Value* left = Use(instr->left()); | |
+ llvm::Value* right = Use(instr->right()); | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(context); | |
+ params.push_back(left); | |
+ params.push_back(right); | |
+ AllowHandleAllocation allow_handles; | |
+ Handle<Code> code = | |
+ CodeFactory::BinaryOpIC(isolate(), Token::SUB, | |
+ instr->strength()).code(); | |
+ llvm::Value* sub = CallCode(code, llvm::CallingConv::X86_64_V8_S10, params); | |
+ instr->set_llvm_value(sub); | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoThisFunction(HThisFunction* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoToFastProperties(HToFastProperties* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoTransitionElementsKind( | |
+ HTransitionElementsKind* instr) { | |
+ DCHECK(instr->HasNoUses()); | |
+ auto object = Use(instr->object()); | |
+ Handle<Map> from_map = instr->original_map().handle(); | |
+ Handle<Map> to_map = instr->transitioned_map().handle(); | |
+ ElementsKind from_kind = instr->from_kind(); | |
+ ElementsKind to_kind = instr->to_kind(); | |
+ | |
+ llvm::BasicBlock* end = NewBlock("TransitionElementsKind end"); | |
+ llvm::BasicBlock* cont = NewBlock("TransitionElementsKind meat"); | |
+ | |
+ auto comp = Compare(LoadFieldOperand(object, HeapObject::kMapOffset), | |
+ from_map); | |
+ __ CreateCondBr(comp, cont, end); | |
+ __ SetInsertPoint(cont); | |
+ | |
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) { | |
+ // map is a tagged value. | |
+ auto new_map = Move(to_map, RelocInfo::EMBEDDED_OBJECT); | |
+ auto store_addr = FieldOperand(object, HeapObject::kMapOffset); | |
+ auto casted_store_addr = __ CreateBitCast(store_addr, Types::ptr_tagged); | |
+ __ CreateStore(new_map, casted_store_addr); | |
+ // Write barrier. TODO(llvm): give llvm.gcwrite and company a thought. | |
+ RecordWriteForMap(object, new_map); | |
+ __ CreateBr(end); | |
+ } else { | |
+ | |
+ | |
+ AllowHeapAllocation allow_heap; | |
+ bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; | |
+ llvm::Value* map = MoveHeapObject(to_map); | |
+ TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(object); | |
+ params.push_back(map); | |
+ params.push_back(GetContext()); | |
+ AllowHandleAllocation allow_handles; | |
+ CallCode(stub.GetCode(), llvm::CallingConv::X86_64_V8_CES, params); | |
+ //RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0); | |
+ __ CreateBr(end); | |
+ } | |
+ __ SetInsertPoint(end); | |
+} | |
+ | |
+void LLVMChunkBuilder::RecordWriteForMap(llvm::Value* object, | |
+ llvm::Value* map) { | |
+ AssertNotSmi(object); | |
+ | |
+ if (emit_debug_code()) { | |
+ auto maps_equal = CompareMap(map, isolate()->factory()->meta_map()); | |
+ Assert(maps_equal); | |
+ } | |
+ | |
+ if (!FLAG_incremental_marking) { | |
+ return; | |
+ } | |
+ | |
+ if (emit_debug_code()) { | |
+ // FIXME(llvm): maybe we should dereference the FieldOperand | |
+ Assert(Compare(map, LoadFieldOperand(object, HeapObject::kMapOffset))); | |
+ } | |
+ | |
+ auto map_address = FieldOperand(object, HeapObject::kMapOffset); // dst | |
+ map_address = __ CreateBitOrPointerCast(map_address, Types::tagged); | |
+ | |
+ auto equal = CheckPageFlag(map, | |
+ MemoryChunk::kPointersToHereAreInterestingMask); | |
+ | |
+ auto cont = NewBlock("CheckPageFlag OK"); | |
+ auto call_stub = NewBlock("Call RecordWriteStub"); | |
+ __ CreateCondBr(equal, cont, call_stub); | |
+ | |
+ __ SetInsertPoint(call_stub); | |
+ // The following are the registers expected by the calling convention. | |
+ // They can be changed, but the CC must be adjusted accordingly. | |
+ Register object_reg = rbx; | |
+ Register map_reg = rcx; | |
+ Register dst_reg = rdx; | |
+ RecordWriteStub stub(isolate(), object_reg, map_reg, dst_reg, | |
+ OMIT_REMEMBERED_SET, kDontSaveFPRegs); | |
+ Handle<Code> code = Handle<Code>::null(); | |
+ { | |
+ AllowHandleAllocation allow_handles; | |
+ AllowHeapAllocation allow_heap_alloc; | |
+ code = stub.GetCode(); | |
+ // FIXME(llvm,gc): respect reloc info mode... | |
+ } | |
+ std::vector<llvm::Value*> params = { object, map, map_address }; | |
+ CallCode(code, llvm::CallingConv::X86_64_V8_RWS, params, false); | |
+ __ CreateBr(cont); | |
+ | |
+ __ SetInsertPoint(cont); | |
+ | |
+ // Count number of write barriers in generated code. | |
+ isolate()->counters()->write_barriers_static()->Increment(); | |
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoTrapAllocationMemento(HTrapAllocationMemento* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoTypeof(HTypeof* instr) { | |
+ llvm::Value* context = Use(instr->context()); | |
+ llvm::Value* value = Use(instr->value()); | |
+ llvm::BasicBlock* do_call = NewBlock("DoTypeof Call Stub"); | |
+ llvm::BasicBlock* no_call = NewBlock("DoTypeof Fast"); | |
+ llvm::BasicBlock* end = NewBlock("DoTypeof Merge"); | |
+ llvm::Value* not_smi = SmiCheck(value, true); | |
+ __ CreateCondBr(not_smi, do_call, no_call); | |
+ | |
+ __ SetInsertPoint(no_call); | |
+ Factory* factory = isolate()->factory(); | |
+ Handle<String> string = factory->number_string(); | |
+ llvm::Value* val = MoveHeapObject(string); | |
+ __ CreateBr(end); | |
+ | |
+ __ SetInsertPoint(do_call); | |
+ | |
+ AllowHandleAllocation allow_handles; | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(context); | |
+ params.push_back(value); | |
+ TypeofStub stub(isolate()); | |
+ Handle<Code> code = stub.GetCode(); | |
+ llvm::Value* call = CallCode(code, llvm::CallingConv::X86_64_V8_S8, params); | |
+ __ CreateBr(end); | |
+ __ SetInsertPoint(end); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::tagged, 2); | |
+ phi->addIncoming(val, no_call); | |
+ phi->addIncoming(call, do_call); | |
+ instr->set_llvm_value(phi); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) { | |
+ llvm::Value* input = Use(instr->value()); | |
+ Factory* factory = isolate()->factory(); | |
+ llvm::BasicBlock* not_smi = NewBlock("DoTypeofIsAndBranch NotSmi"); | |
+ Handle<String> type_name = instr->type_literal(); | |
+ if (String::Equals(type_name, factory->number_string())) { | |
+ llvm::Value* smi_cond = SmiCheck(input); | |
+ __ CreateCondBr(smi_cond, Use(instr->SuccessorAt(1)), not_smi); | |
+ __ SetInsertPoint(not_smi); | |
+ | |
+ llvm::Value* root = LoadFieldOperand(input, HeapObject::kMapOffset); | |
+ llvm::Value* cmp_root = CompareRoot(root, Heap::kHeapNumberMapRootIndex); | |
+ __ CreateCondBr(cmp_root, Use(instr->SuccessorAt(0)), | |
+ Use(instr->SuccessorAt(1))); | |
+ } else if (String::Equals(type_name, factory->string_string())) { | |
+ UNIMPLEMENTED(); | |
+ //TODO: find test | |
+ llvm::Value* smi_cond = SmiCheck(input); | |
+ __ CreateCondBr(smi_cond, Use(instr->SuccessorAt(1)), not_smi); | |
+ | |
+ __ SetInsertPoint(not_smi); | |
+ llvm::Value* map = LoadFieldOperand(input, HeapObject::kMapOffset); | |
+ auto imm = static_cast<int8_t>(FIRST_NONSTRING_TYPE); | |
+ llvm::Value* type_offset = LoadFieldOperand(map, Map::kInstanceTypeOffset); | |
+ llvm::Value* cond = __ CreateICmpUGE(type_offset, __ getInt64(imm)); | |
+ __ CreateCondBr(cond, Use(instr->SuccessorAt(0)), | |
+ Use(instr->SuccessorAt(1))); | |
+ } else if (String::Equals(type_name, factory->symbol_string())) { | |
+ UNIMPLEMENTED(); | |
+ } else if (String::Equals(type_name, factory->boolean_string())) { | |
+ UNIMPLEMENTED(); | |
+ } else if (String::Equals(type_name, factory->undefined_string())) { | |
+ //TODO: not tested | |
+ llvm::BasicBlock* after_cmp_root = NewBlock("DoTypeofIsAndBranch " | |
+ "after compare root"); | |
+ llvm::Value* cmp_root = CompareRoot(input, Heap::kUndefinedValueRootIndex); | |
+ __ CreateCondBr(cmp_root, Use(instr->SuccessorAt(0)), after_cmp_root); | |
+ | |
+ __ SetInsertPoint(after_cmp_root); | |
+ llvm::BasicBlock* after_check_smi = NewBlock("DoTypeofIsAndBranch " | |
+ "after check smi"); | |
+ llvm::Value* smi_cond = SmiCheck(input, false); | |
+ __ CreateCondBr(smi_cond, Use(instr->SuccessorAt(1)), after_check_smi); | |
+ | |
+ __ SetInsertPoint(after_check_smi); | |
+ llvm::Value* map_offset = LoadFieldOperand(input, HeapObject::kMapOffset); | |
+ llvm::Value* is_undetectable = __ getInt64(1 << Map::kIsUndetectable); | |
+ llvm::Value* result = LoadFieldOperand(map_offset, Map::kBitFieldOffset); | |
+ llvm::Value* test = __ CreateAnd(result, is_undetectable); | |
+ llvm::Value* cmp = __ CreateICmpNE(test, __ getInt64(0)); | |
+ __ CreateCondBr(cmp, Use(instr->SuccessorAt(0)), Use(instr->SuccessorAt(1))); | |
+ } else if (String::Equals(type_name, factory->function_string())) { | |
+ llvm::Value* smi_cond = SmiCheck(input); | |
+ __ CreateCondBr(smi_cond, Use(instr->SuccessorAt(1)), not_smi); | |
+ | |
+ __ SetInsertPoint(not_smi); | |
+ llvm::Value* map_offset = LoadFieldOperand(input, HeapObject::kMapOffset); | |
+ llvm::Value* bit_field = LoadFieldOperand(map_offset, Map::kBitFieldOffset); | |
+ llvm::Value* input_chenged = __ CreateAnd(bit_field, __ getInt64(0x000000ff)); //movzxbl | |
+ llvm::Value* imm = __ getInt64((1 << Map::kIsCallable) | | |
+ (1 << Map::kIsUndetectable)); | |
+ llvm::Value* result = __ CreateAnd(input_chenged,imm); | |
+ llvm::Value* cmp = __ CreateICmpEQ(result, | |
+ __ getInt64(1 << Map::kIsCallable)); | |
+ __ CreateCondBr(cmp, Use(instr->SuccessorAt(0)), | |
+ Use(instr->SuccessorAt(1))); | |
+ } else if (String::Equals(type_name, factory->object_string())) { | |
+ llvm::Value* smi_cond = SmiCheck(input); | |
+ __ CreateCondBr(smi_cond, Use(instr->SuccessorAt(1)), not_smi); | |
+ __ SetInsertPoint(not_smi); | |
+ llvm::BasicBlock* after_cmp_root = NewBlock("DoTypeofIsAndBranch " | |
+ "after compare root"); | |
+ llvm::Value* cmp_root = CompareRoot(input, Heap::kNullValueRootIndex); | |
+ __ CreateCondBr(cmp_root, Use(instr->SuccessorAt(0)), after_cmp_root); | |
+ | |
+ __ SetInsertPoint(after_cmp_root); | |
+ llvm::BasicBlock* after_cmp_type = NewBlock("DoTypeofIsAndBranch " | |
+ "after cmpare type"); | |
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); | |
+ llvm::Value* map = LoadFieldOperand(input, HeapObject::kMapOffset); | |
+ llvm::Value* result = LoadFieldOperand(map, Map::kInstanceTypeOffset); | |
+ llvm::Value* type = __ getInt64(static_cast<int>(FIRST_SPEC_OBJECT_TYPE)); | |
+ llvm::Value* cmp = __ CreateICmpULT(result, type); | |
+ __ CreateCondBr(cmp, Use(instr->SuccessorAt(1)), after_cmp_type); | |
+ | |
+ __ SetInsertPoint(after_cmp_type); | |
+ llvm::Value* bit_field = LoadFieldOperand(map, Map::kBitFieldOffset); | |
+ llvm::Value* imm = __ getInt64((1 << Map::kIsCallable) | | |
+ (1 << Map::kIsUndetectable)); | |
+ llvm::Value* test = __ CreateAnd(bit_field, imm); | |
+ llvm::Value* cmp_result = __ CreateICmpNE(test, __ getInt64(0)); | |
+ __ CreateCondBr(cmp_result, Use(instr->SuccessorAt(0)), | |
+ Use(instr->SuccessorAt(1))); | |
+ | |
+ // clang-format off | |
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \ | |
+ } else if (String::Equals(type_name, factory->type##_string())) { \ | |
+ llvm::Value* smi_cond = SmiCheck(input); \ | |
+ __ CreateCondBr(smi_cond, Use(instr->SuccessorAt(1)), not_smi); \ | |
+ __ SetInsertPoint(not_smi); \ | |
+ llvm::Value* value = LoadFieldOperand(input, \ | |
+ HeapObject::kMapOffset); \ | |
+ llvm::Value* cmp_root = CompareRoot(value, \ | |
+ Heap::k##Type##MapRootIndex); \ | |
+ __ CreateCondBr(cmp_root, Use(instr->SuccessorAt(0)), \ | |
+ Use(instr->SuccessorAt(1))); \ | |
+ | |
+ SIMD128_TYPES(SIMD128_TYPE) | |
+#undef SIMD128_TYPE | |
+ // clang-format on | |
+ | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoIntegerMathAbs(HUnaryMathOperation* instr) { | |
+ llvm::BasicBlock* is_negative = NewBlock("ABS INTEGER CANDIDATE IS NEGATIVE"); | |
+ llvm::BasicBlock* is_positive = NewBlock("ABS INTEGER CANDIDATE IS POSITIVE"); | |
+ | |
+ llvm::Value* zero = __ getInt32(0); | |
+ llvm::Value* cmp = __ CreateICmpSLT(Use(instr->value()), zero); | |
+ __ CreateCondBr(cmp, is_negative, is_positive); | |
+ __ SetInsertPoint(is_negative); | |
+ llvm::Value* neg_val = __ CreateNeg(Use(instr->value())); | |
+ llvm::Value* is_neg = __ CreateICmpSLT(neg_val, zero); | |
+ DeoptimizeIf(is_neg, Deoptimizer::kOverflow, false, is_positive); | |
+ __ SetInsertPoint(is_positive); | |
+ llvm::Value* val = Use(instr->value()); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::i32, 2); | |
+ phi->addIncoming(neg_val, is_negative); | |
+ phi->addIncoming(val, is_positive); | |
+ instr->set_llvm_value(phi); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoSmiMathAbs(HUnaryMathOperation* instr) { | |
+ llvm::BasicBlock* is_negative = NewBlock("ABS SMI CANDIDATE IS NEGATIVE"); | |
+ llvm::BasicBlock* is_positive = NewBlock("ABS SMI CANDIDATE IS POSITIVE"); | |
+ | |
+ llvm::BasicBlock* insert_block = __ GetInsertBlock(); | |
+ llvm::Value* value = Use(instr->value()); | |
+ llvm::Value* cmp = __ CreateICmpSLT(Use(instr->value()), __ getInt64(0)); | |
+ __ CreateCondBr(cmp, is_negative, is_positive); | |
+ __ SetInsertPoint(is_negative); | |
+ llvm::Value* neg_val = __ CreateNeg(Use(instr->value())); | |
+ llvm::Value* is_neg = __ CreateICmpSLT(neg_val, __ getInt64(0)); | |
+ DeoptimizeIf(is_neg, Deoptimizer::kOverflow, false, is_positive); | |
+ __ SetInsertPoint(is_positive); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::smi, 2); | |
+ phi->addIncoming(neg_val, is_negative); | |
+ phi->addIncoming(value, insert_block); | |
+ instr->set_llvm_value(phi); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { | |
+ Representation r = instr->representation(); | |
+ if (r.IsDouble()) { | |
+ llvm::Function* fabs_intrinsic = llvm::Intrinsic:: | |
+ getDeclaration(module_.get(), llvm::Intrinsic::fabs, Types::float64); | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(Use(instr->value())); | |
+ llvm::Value* f_abs = __ CreateCall(fabs_intrinsic, params); | |
+ instr->set_llvm_value(f_abs); | |
+ } else if (r.IsInteger32()) { | |
+ DoIntegerMathAbs(instr); | |
+ } else if (r.IsSmi()) { | |
+ DoSmiMathAbs(instr); | |
+ } else { | |
+ UNIMPLEMENTED(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { | |
+ //TODO : add -infinity and infinity checks | |
+ llvm::Value* input_ = Use(instr->value()); | |
+ llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::sqrt, Types::float64); | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(input_); | |
+ llvm::Value* call = __ CreateCall(intrinsic, params); | |
+ instr->set_llvm_value(call); | |
+ | |
+} | |
+ | |
+void LLVMChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) { | |
+ llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::sqrt, Types::float64); | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(Use(instr->value())); | |
+ llvm::Value* sqrt = __ CreateCall(intrinsic, params); | |
+ instr->set_llvm_value(sqrt); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { | |
+ llvm::Value* llvm_double_one_half = llvm::ConstantFP::get(Types::float64, 0.5); | |
+ llvm::Value* llvm_double_minus_one_half = llvm::ConstantFP::get(Types::float64, -0.5); | |
+ llvm::Value* input_reg = Use(instr->value()); | |
+ llvm::Value* input_temp = nullptr; | |
+ llvm::Value* xmm_scratch = nullptr; | |
+ llvm::BasicBlock* round_to_zero = NewBlock("Round to zero"); | |
+ llvm::BasicBlock* round_to_one = NewBlock("Round to one"); | |
+ llvm::BasicBlock* below_one_half = NewBlock("Below one half"); | |
+ llvm::BasicBlock* above_one_half = NewBlock("Above one half"); | |
+ llvm::BasicBlock* not_equal = NewBlock("Not equal"); | |
+ llvm::BasicBlock* round_result = NewBlock("Jump to final Round result block"); | |
+ /*if (DeoptEveryNTimes()){ | |
+ UNIMPLEMENTED(); | |
+ }*/ | |
+ llvm::Value* cmp = __ CreateFCmpOGT(llvm_double_one_half, input_reg); | |
+ __ CreateCondBr(cmp, below_one_half, above_one_half); | |
+ | |
+ __ SetInsertPoint(above_one_half); | |
+ xmm_scratch = __ CreateFAdd(llvm_double_one_half, input_reg); | |
+ llvm::Value* output_reg1 = __ CreateFPToSI(xmm_scratch, Types::i32); | |
+ //DeoptimizeIF | |
+ auto type = instr->representation().IsSmi() ? Types::i64 : Types::i32; | |
+ llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::ssub_with_overflow, type); | |
+ llvm::Value* params[] = { output_reg1, __ getInt32(0x1) }; | |
+ llvm::Value* call = __ CreateCall(intrinsic, params); | |
+ llvm::Value* overflow = __ CreateExtractValue(call, 1); | |
+ DeoptimizeIf(overflow, Deoptimizer::kOverflow); | |
+ __ CreateBr(round_result); | |
+ | |
+ __ SetInsertPoint(below_one_half); | |
+ cmp = __ CreateFCmpOLE(llvm_double_minus_one_half, input_reg); | |
+ __ CreateCondBr(cmp, round_to_zero, round_to_one); | |
+ | |
+ __ SetInsertPoint(round_to_one); | |
+ input_temp = __ CreateFSub(input_reg, llvm_double_minus_one_half); | |
+ llvm::Value* output_reg2 = __ CreateFPToSI(input_temp, Types::i32); | |
+ auto instr_type = instr->representation().IsSmi() ? Types::i64 : Types::i32; | |
+ llvm::Function* ssub_intrinsic = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::ssub_with_overflow, instr_type); | |
+ llvm::Value* parameters[] = { output_reg2, __ getInt32(0x1) }; | |
+ llvm::Value* call_intrinsic = __ CreateCall(ssub_intrinsic, parameters); | |
+ llvm::Value* cmp_overflow = __ CreateExtractValue(call_intrinsic, 1); | |
+ DeoptimizeIf(cmp_overflow, Deoptimizer::kOverflow); | |
+ xmm_scratch = __ CreateSIToFP(output_reg2, Types::float64); | |
+ cmp = __ CreateFCmpOEQ(xmm_scratch, input_reg); | |
+ __ CreateCondBr(cmp, round_result, not_equal); | |
+ | |
+ __ SetInsertPoint(not_equal); | |
+ llvm::Value* output_reg3 = __ CreateNSWSub(output_reg2, __ getInt32(1)); | |
+ __ CreateBr(round_result); | |
+ | |
+ __ SetInsertPoint(round_to_zero); | |
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
+ //UNIMPLEMENTED(); | |
+ llvm::Value* cmp_zero = __ CreateFCmpOLT(input_reg, __ CreateSIToFP(__ getInt64(0), Types::float64)); | |
+ DeoptimizeIf(cmp_zero, Deoptimizer::kMinusZero); | |
+ } | |
+ llvm::Value* output_reg4 = __ getInt32(6); | |
+ __ CreateBr(round_result); | |
+ | |
+ __ SetInsertPoint(round_result); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::i32, 4); | |
+ phi->addIncoming(output_reg1, above_one_half); | |
+ phi->addIncoming(output_reg2, round_to_one); | |
+ phi->addIncoming(output_reg3, not_equal); | |
+ phi->addIncoming(output_reg4, round_to_zero); | |
+ instr->set_llvm_value(phi); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { | |
+ llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::log, Types::float64); | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(Use(instr->value())); | |
+ llvm::Value* log = __ CreateCall(intrinsic, params); | |
+ instr->set_llvm_value(log); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { | |
+ llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(module_.get(), | |
+ llvm::Intrinsic::exp, Types::float64); | |
+ std::vector<llvm::Value*> params; | |
+ params.push_back(Use(instr->value())); | |
+ llvm::Value* exp = __ CreateCall(intrinsic, params); | |
+ instr->set_llvm_value(exp); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { | |
+ switch (instr->op()) { | |
+ case kMathAbs: | |
+ DoMathAbs(instr); | |
+ break; | |
+ case kMathPowHalf: | |
+ DoMathPowHalf(instr); | |
+ break; | |
+ case kMathFloor: { | |
+ DoMathFloor(instr); | |
+ break; | |
+ } | |
+ case kMathRound: { | |
+ DoMathRound(instr); | |
+ break; | |
+ } | |
+ case kMathFround: { | |
+ //FIXME(llvm): Is this right? | |
+ llvm::Value* value = Use(instr->value()); | |
+ llvm::Value* trunc_fp = __ CreateFPTrunc(value, Types::float32); | |
+ llvm::Value* result = __ CreateFPExt(trunc_fp, Types::float64); | |
+ instr->set_llvm_value(result); | |
+ break; | |
+ } | |
+ case kMathLog: { | |
+ DoMathLog(instr); | |
+ break; | |
+ } | |
+ case kMathExp: { | |
+ DoMathExp(instr); | |
+ break; | |
+ } | |
+ case kMathSqrt: { | |
+ DoMathSqrt(instr); | |
+ break; | |
+ } | |
+ case kMathClz32: | |
+ UNIMPLEMENTED(); | |
+ default: | |
+ UNREACHABLE(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { | |
+ int env_index = instr->index(); | |
+ int spill_index = 0; | |
+ if (instr->environment()->is_parameter_index(env_index)) { | |
+ spill_index = chunk()->GetParameterStackSlot(env_index); | |
+ spill_index = -spill_index; | |
+ llvm::Function::arg_iterator it = function_->arg_begin(); | |
+ int i = 0; | |
+ while (++i < 3 + spill_index) ++it; | |
+ llvm::Value* result = it; | |
+ instr->set_llvm_value(result); | |
+ } else { | |
+ spill_index = env_index - instr->environment()->first_local_index(); | |
+ if (spill_index > LUnallocated::kMaxFixedSlotIndex) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+ if (spill_index >=0) { | |
+ bool is_volatile = true; | |
+ llvm::Value* result = __ CreateLoad(osr_preserved_values_[spill_index], is_volatile); | |
+ result = __ CreateBitOrPointerCast(result, GetLLVMType(instr->representation())); | |
+ instr->set_llvm_value(result); | |
+ } else { | |
+ //TODO: Check this case | |
+ DCHECK(spill_index == -1); | |
+ spill_index = 1; | |
+ llvm::Function::arg_iterator it = function_->arg_begin(); | |
+ int i = 0; | |
+ while (++i < 3 + spill_index) ++it; | |
+ llvm::Value* result = it; | |
+ instr->set_llvm_value(result); | |
+ } | |
+ | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoUseConst(HUseConst* instr) { | |
+ //UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { | |
+ llvm::Value* receiver = Use(instr->receiver()); | |
+ llvm::Value* function = Use(instr->function()); | |
+ llvm::BasicBlock* insert_block = __ GetInsertBlock(); | |
+ llvm::BasicBlock* global_object = NewBlock("DoWrapReceiver Global object"); | |
+ llvm::BasicBlock* not_global_object = NewBlock("DoWrapReceiver " | |
+ "Not global object"); | |
+ llvm::BasicBlock* not_equal = NewBlock("DoWrapReceiver not_equal"); | |
+ llvm::BasicBlock* receiver_ok = NewBlock("DoWrapReceiver Receiver ok block"); | |
+ llvm::BasicBlock* dist = NewBlock("DoWrapReceiver Distance"); | |
+ llvm::BasicBlock* receiver_fail = NewBlock("DoWrapReceiver Receiver" | |
+ " fail block"); | |
+ | |
+ if (!instr->known_function()){ | |
+ llvm::Value* op = LoadFieldOperand(function, | |
+ JSFunction::kSharedFunctionInfoOffset); | |
+ llvm::Value* bit_with_byte = | |
+ __ getInt64(1 << SharedFunctionInfo::kStrictModeBitWithinByte); | |
+ llvm::Value* byte_offset = LoadFieldOperand(op, | |
+ SharedFunctionInfo::kStrictModeByteOffset); | |
+ llvm::Value* casted_offset = __ CreatePtrToInt(byte_offset, Types::i64); | |
+ llvm::Value* cmp = __ CreateICmpNE(casted_offset, bit_with_byte); | |
+ __ CreateCondBr(cmp, receiver_ok, receiver_fail); | |
+ __ SetInsertPoint(receiver_fail); | |
+ llvm::Value* native_byte_offset = LoadFieldOperand(op, | |
+ SharedFunctionInfo::kNativeByteOffset); | |
+ llvm::Value* native_bit_with_byte = | |
+ __ getInt64(1 << SharedFunctionInfo::kNativeBitWithinByte); | |
+ llvm::Value* casted_native_offset = __ CreatePtrToInt(native_byte_offset, | |
+ Types::i64); | |
+ llvm::Value* compare = __ CreateICmpNE(casted_native_offset, | |
+ native_bit_with_byte); | |
+ __ CreateCondBr(compare, receiver_ok, dist); | |
+ } else | |
+ __ CreateBr(dist); | |
+ __ SetInsertPoint(dist); | |
+ | |
+ // Normal function. Replace undefined or null with global receiver. | |
+ llvm::Value* compare_root = CompareRoot(receiver, Heap::kNullValueRootIndex); | |
+ __ CreateCondBr(compare_root, global_object, not_global_object); | |
+ __ SetInsertPoint(not_global_object); | |
+ llvm::Value* comp_root = CompareRoot(receiver, | |
+ Heap::kUndefinedValueRootIndex); | |
+ __ CreateCondBr(comp_root, global_object, not_equal); | |
+ | |
+ // The receiver should be a JS object | |
+ __ SetInsertPoint(not_equal); | |
+ llvm::Value* is_smi = SmiCheck(receiver); | |
+ DeoptimizeIf(is_smi, Deoptimizer::kSmi); | |
+ llvm::Value* compare_obj = CmpObjectType(receiver, | |
+ FIRST_SPEC_OBJECT_TYPE, | |
+ llvm::CmpInst::ICMP_ULT); | |
+ DeoptimizeIf(compare_obj, Deoptimizer::kNotAJavaScriptObject); | |
+ __ CreateBr(receiver_ok); | |
+ | |
+ __ SetInsertPoint(global_object); | |
+ llvm::Value* global_receiver = LoadFieldOperand(function, | |
+ JSFunction::kContextOffset); | |
+ global_receiver = LoadFieldOperand(global_receiver, | |
+ Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)); | |
+ global_receiver = LoadFieldOperand(global_receiver, | |
+ GlobalObject::kGlobalProxyOffset); | |
+ __ CreateBr(receiver_ok); | |
+ | |
+ __ SetInsertPoint(receiver_ok); | |
+ llvm::PHINode* phi = __ CreatePHI(Types::tagged, 2); | |
+ phi->addIncoming(global_receiver, global_object); | |
+ phi->addIncoming(receiver, insert_block); | |
+ instr->set_llvm_value(phi); | |
+} | |
+ | |
+ | |
+llvm::Value* LLVMChunkBuilder::CmpObjectType(llvm::Value* heap_object, | |
+ InstanceType type, | |
+ llvm::CmpInst::Predicate predicate) { | |
+ llvm::Value* map = LoadFieldOperand(heap_object, HeapObject::kMapOffset); | |
+ llvm::Value* map_as_ptr_to_i8 = __ CreateBitOrPointerCast(map, Types::ptr_i8); | |
+ llvm::Value* object_type_addr = ConstructAddress( | |
+ map_as_ptr_to_i8, Map::kInstanceTypeOffset - kHeapObjectTag); | |
+ llvm::Value* object_type = __ CreateLoad(object_type_addr); | |
+ llvm::Value* expected_type = __ getInt8(static_cast<int8_t>(type)); | |
+ return __ CreateICmp(predicate, object_type, expected_type); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoCheckArrayBufferNotNeutered( | |
+ HCheckArrayBufferNotNeutered* instr) { | |
+ llvm::Value* view = Use(instr->value()); | |
+ llvm::Value* array_offset = LoadFieldOperand(view, | |
+ JSArrayBufferView::kBufferOffset); | |
+ llvm::Value* bit_field_offset = LoadFieldOperand(array_offset, | |
+ JSArrayBuffer::kBitFieldOffset); | |
+ bit_field_offset = __ CreateBitOrPointerCast(bit_field_offset, Types::i64); | |
+ llvm::Value* shift = __ getInt64(1 << JSArrayBuffer::WasNeutered::kShift); | |
+ llvm::Value* test = __ CreateAnd(bit_field_offset, shift); | |
+ llvm::Value* cmp = __ CreateICmpNE(test, __ getInt64(0)); | |
+ DeoptimizeIf(cmp, Deoptimizer::kOutOfBounds); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoLoadGlobalViaContext(HLoadGlobalViaContext* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) { | |
+ //TODO: not tested, 3d-cube.js in functions MMulti | |
+ llvm::BasicBlock* insert = __ GetInsertBlock(); | |
+ llvm::BasicBlock* deferred = NewBlock("MaybeGrowElements deferred"); | |
+ llvm::BasicBlock* done = NewBlock("MaybeGrowElements done"); | |
+ llvm::Value* result = Use(instr->elements()); | |
+ llvm::Value* result_from_deferred = nullptr; | |
+ HValue* key = instr->key(); | |
+ HValue* current_capacity = instr->current_capacity(); | |
+ DCHECK(instr->key()->representation().IsInteger32()); | |
+ DCHECK(instr->current_capacity()->representation().IsInteger32()); | |
+ if (key->IsConstant() && current_capacity->IsConstant()) { | |
+ UNIMPLEMENTED(); | |
+ } else if (key->IsConstant()) { | |
+ int32_t constant_key = (HConstant::cast(key))->Integer32Value(); | |
+ llvm::Value* capacity = Use(instr->current_capacity()); | |
+ llvm::Value* cmp = __ CreateICmpSLE(capacity, __ getInt32(constant_key)); | |
+ __ CreateCondBr(cmp, deferred, done); | |
+ } else if (current_capacity->IsConstant()) { | |
+ UNIMPLEMENTED(); | |
+ } else { | |
+ llvm::Value* cmp = __ CreateICmpSGE(Use(key), Use(current_capacity)); | |
+ __ CreateCondBr(cmp, deferred, done); | |
+ } | |
+ | |
+ __ SetInsertPoint(deferred); | |
+ std::vector<llvm::Value*> params; | |
+ //PushSafepointRegistersScope scope(this); | |
+ if (instr->object()->IsConstant()) { | |
+ HConstant* constant_object = HConstant::cast(instr->object()); | |
+ if (instr->object()->representation().IsSmi()) { | |
+ UNIMPLEMENTED(); | |
+ } else { | |
+ Handle<Object> handle_value = constant_object->handle(isolate()); | |
+ llvm::Value* object = MoveHeapObject(handle_value); | |
+ params.push_back(object); | |
+ } | |
+ } else { | |
+ llvm::Value* object = Use(instr->object()); | |
+ params.push_back(object); | |
+ } | |
+ | |
+ if (key->IsConstant()) { | |
+ HConstant* constant = HConstant::cast(key); | |
+ Smi* smi_key = Smi::FromInt(constant->Integer32Value()); | |
+ llvm::Value* smi = ValueFromSmi(smi_key); | |
+ params.push_back(smi); | |
+ } else { | |
+ llvm::Value* smi_key = Integer32ToSmi(key); | |
+ params.push_back(smi_key); | |
+ } | |
+ | |
+ GrowArrayElementsStub stub(isolate(), instr->is_js_array(), | |
+ instr->kind()); | |
+ | |
+ AllowHandleAllocation allow_handle; | |
+ AllowHeapAllocation allow_heap; | |
+ result_from_deferred = CallCode(stub.GetCode(), | |
+ llvm::CallingConv::X86_64_V8_S12, | |
+ params); | |
+// RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0); | |
+ // __ StoreToSafepointRegisterSlot(result, result); | |
+ llvm::Value* is_smi = SmiCheck(result_from_deferred); | |
+ DeoptimizeIf(is_smi, Deoptimizer::kSmi); | |
+ __ CreateBr(done); | |
+ | |
+ __ SetInsertPoint(done); | |
+ DCHECK(result->getType() == result_from_deferred->getType()); | |
+ llvm::PHINode* phi = __ CreatePHI(result->getType(), 2); | |
+ phi->addIncoming(result, insert); | |
+ phi->addIncoming(result_from_deferred, deferred); | |
+ instr->set_llvm_value(phi); | |
+} | |
+ | |
+void LLVMChunkBuilder::DoPrologue(HPrologue* instr) { | |
+ if (info_->num_heap_slots() > 0) { | |
+ UNIMPLEMENTED(); | |
+ } | |
+} | |
+ | |
+void LLVMChunkBuilder::DoStoreGlobalViaContext(HStoreGlobalViaContext* instr) { | |
+ UNIMPLEMENTED(); | |
+} | |
+ | |
+void LLVMEnvironment::AddValue(llvm::Value* value, | |
+ Representation representation, | |
+ bool is_uint32) { | |
+ DCHECK(value->getType() == LLVMChunkBuilder::GetLLVMType(representation)); | |
+ values_.Add(value, zone()); | |
+ if (representation.IsSmiOrTagged()) { | |
+ DCHECK(!is_uint32); | |
+ is_tagged_.Add(values_.length() - 1, zone()); | |
+ } | |
+ | |
+ if (is_uint32) { | |
+ is_uint32_.Add(values_.length() - 1, zone()); | |
+ } | |
+ | |
+ if (representation.IsDouble()) { | |
+ is_double_.Add(values_.length() - 1, zone()); | |
+ } | |
+} | |
+ | |
+void LLVMRelocationData::DumpSafepointIds() { | |
+#ifdef DEBUG | |
+ std::cerr << "< Safepoint ids begin: \n"; | |
+ for (GrowableBitVector::Iterator it(&is_safepoint_, zone_); | |
+ !it.Done(); | |
+ it.Advance()) { | |
+ std::cerr << it.Current() << " "; | |
+ } | |
+ std::cerr << "Safepoint ids end >\n"; | |
+#endif | |
+} | |
+ | |
+#undef __ | |
+ | |
+} } // namespace v8::internal | |
diff --git a/src/llvm/llvm-chunk.h b/src/llvm/llvm-chunk.h | |
new file mode 100644 | |
index 0000000..c4ec778 | |
--- /dev/null | |
+++ b/src/llvm/llvm-chunk.h | |
@@ -0,0 +1,869 @@ | |
+// Copyright 2015 ISP RAS. All rights reserved. | |
+// Use of this source code is governed by a BSD-style license that can be | |
+// found in the LICENSE file. | |
+ | |
+#ifndef V8_LLVM_CHUNK_H_ | |
+#define V8_LLVM_CHUNK_H_ | |
+ | |
+#include "llvm-headers.h" | |
+ | |
+#include "src/hydrogen.h" | |
+#include "src/hydrogen-instructions.h" | |
+#include "src/handles.h" | |
+#include "src/x64/lithium-codegen-x64.h" | |
+#include "src/lithium.h" | |
+#include "llvm-stackmaps.h" | |
+#include "pass-rewrite-safepoints.h" | |
+#include "mcjit-memory-manager.h" | |
+#include "src/base/division-by-constant.h" | |
+ | |
+#include <memory> | |
+ | |
+namespace v8 { | |
+namespace internal { | |
+ | |
+// TODO(llvm): Move to a separate file. | |
+// Actually it should be elsewhere. And probably there is. | |
+// So find it and remove this class. | |
+class IntHelper : public AllStatic { | |
+ public: | |
+ // FIXME(llvm): consider int != int32 | |
+ static bool IsInt(uint64_t x) { return is_int32(x); } | |
+ static int AsInt(uint64_t x) { | |
+ DCHECK(IsInt(x)); | |
+ return static_cast<int>(x); | |
+ } | |
+ static bool IsInt(long x) { return is_int32(x); } | |
+ static int AsInt(long x) { | |
+ DCHECK(IsInt(x)); | |
+ return static_cast<int>(x); | |
+ } | |
+ static int AsUInt32(uint64_t x) { | |
+ DCHECK(is_uint32(x)); | |
+ return static_cast<uint32_t>(x); | |
+ } | |
+ static int AsInt32(int64_t x) { | |
+ DCHECK(is_int32(x)); | |
+ return static_cast<int32_t>(x); | |
+ } | |
+}; | |
+ | |
+// ZoneObject is probably a better approach than the fancy | |
+// C++11 smart pointers which I have been using all over the place. | |
+// So TODO(llvm): more zone objects! | |
+struct DeoptIdMap { | |
+ int32_t patchpoint_id; | |
+ int bailout_id; | |
+}; | |
+class LLVMRelocationData : public ZoneObject { | |
+ public: | |
+ union ExtendedInfo { | |
+ bool cell_extended; | |
+ }; | |
+ | |
+ using RelocMap = std::map<uint64_t, std::pair<RelocInfo, ExtendedInfo>>; | |
+ | |
+ LLVMRelocationData(Zone* zone) | |
+ : reloc_map_(), | |
+ last_patchpoint_id_(-1), | |
+ is_reloc_(8, zone), | |
+ is_reloc_with_nop_(8, zone), | |
+ is_deopt_(8, zone), | |
+ is_safepoint_(8, zone), | |
+ deopt_reasons_(), | |
+ num_safepoint_function_args_(), | |
+ is_transferred_(false), | |
+ zone_(zone) {} | |
+ | |
+ void Add(RelocInfo rinfo, ExtendedInfo ex_info) { | |
+ DCHECK(!is_transferred_); | |
+ reloc_map_[rinfo.data()] = std::make_pair(rinfo, ex_info); | |
+ } | |
+ | |
+ RelocMap& reloc_map() { | |
+ return reloc_map_; | |
+ } | |
+ | |
+ int32_t GetNextUnaccountedPatchpointId(); | |
+ int32_t GetNextDeoptPatchpointId(); | |
+ int32_t GetNextSafepointPatchpointId(size_t num_passed_args); | |
+ int32_t GetNextRelocPatchpointId(size_t num_passed_args = -1, | |
+ bool is_safepoint = false); | |
+ int32_t GetNextRelocNopPatchpointId(size_t num_passed_args = -1, | |
+ bool is_safepoint = false); | |
+ int32_t GetNextDeoptRelocPatchpointId(); | |
+ size_t GetNumSafepointFuncionArgs(int32_t patchpoint_id); | |
+ Deoptimizer::DeoptReason GetDeoptReason(int32_t patchpoint_id); | |
+ void SetDeoptReason(int32_t patchpoint_id, Deoptimizer::DeoptReason reason); | |
+ int GetBailoutId(int32_t patchpoint_id); | |
+ void SetBailoutId(int32_t patchpoint_id, int bailout_id); | |
+ bool IsPatchpointIdDeopt(int32_t patchpoint_id); | |
+ bool IsPatchpointIdSafepoint(int32_t patchpoint_id); | |
+ bool IsPatchpointIdReloc(int32_t patchpoint_id); | |
+ bool IsPatchpointIdRelocNop(int32_t patchpoint_id); | |
+ | |
+ void transfer() { is_transferred_ = true; } | |
+ | |
+ void DumpSafepointIds(); | |
+ | |
+ private: | |
+ // TODO(llvm): re-think the design and probably use ZoneHashMap | |
+ RelocMap reloc_map_; | |
+ int32_t last_patchpoint_id_; | |
+ // FIXME(llvm): not totally sure those belong here: | |
+ // Patchpoint ids belong to one (or more) of the following: | |
+ GrowableBitVector is_reloc_; | |
+ GrowableBitVector is_reloc_with_nop_; | |
+ ZoneList<DeoptIdMap> is_deopt_; | |
+ GrowableBitVector is_safepoint_; | |
+ // FIXME(llvm): make it a ZoneHashMap | |
+ std::map<int32_t, Deoptimizer::DeoptReason> deopt_reasons_; | |
+ std::map<int32_t, size_t> num_safepoint_function_args_; | |
+ bool is_transferred_; | |
+ Zone* zone_; | |
+}; | |
+ | |
+// TODO(llvm): move this class to a separate file. Or, better, 2 files | |
+class LLVMGranularity final { | |
+ public: | |
+ static LLVMGranularity& getInstance() { | |
+ static LLVMGranularity instance; | |
+ return instance; | |
+ } | |
+ | |
+ // TODO(llvm): | |
+// ~LLVMGranularity() { | |
+// llvm::llvm_shutdown(); | |
+// } | |
+ | |
+ LLVMContext& context() { return context_; } | |
+ MCJITMemoryManager* memory_manager_ref() { return memory_manager_ref_; } | |
+ | |
+ std::unique_ptr<llvm::Module> CreateModule(std::string name = "") { | |
+ if ("" == name) { | |
+ name = GenerateName(); | |
+ } | |
+ return llvm::make_unique<llvm::Module>(name, context_); | |
+ } | |
+ | |
+ void AddModule(std::unique_ptr<llvm::Module> module) { | |
+ if (!engine_) { | |
+ std::vector<std::string> machine_attributes; | |
+ SetMachineAttributes(machine_attributes); | |
+ | |
+ std::unique_ptr<MCJITMemoryManager> manager = | |
+ MCJITMemoryManager::Create(); | |
+ memory_manager_ref_ = manager.get(); // non-owning! | |
+ | |
+ llvm::ExecutionEngine* raw = llvm::EngineBuilder(std::move(module)) | |
+ .setMCJITMemoryManager(std::move(manager)) | |
+ .setErrorStr(&err_str_) | |
+ .setEngineKind(llvm::EngineKind::JIT) | |
+ .setMAttrs(machine_attributes) | |
+ .setMCPU("x86-64") | |
+ .setRelocationModel(llvm::Reloc::PIC_) // position independent code | |
+ // A good read on code models can be found here: | |
+ // eli.thegreenplace.net/2012/01/03/understanding-the-x64-code-models | |
+ // We use a modified Large code model, which uses rip-relative | |
+ // addressing for jump tables. | |
+ .setCodeModel(llvm::CodeModel::Large) | |
+ .setOptLevel(llvm::CodeGenOpt::Aggressive) // backend opt level | |
+ .create(); | |
+ engine_ = std::unique_ptr<llvm::ExecutionEngine>(raw); | |
+ CHECK(engine_); | |
+ } else { | |
+ engine_->addModule(std::move(module)); | |
+ } | |
+ // Finalize each time after adding a new module | |
+ // (assuming the added module is constructed and won't change) | |
+ engine_->finalizeObject(); | |
+ } | |
+ | |
+ void OptimizeFunciton(llvm::Module* module, llvm::Function* function) { | |
+ // TODO(llvm): 1). Instead of using -O3 optimizations, add the | |
+ // appropriate passes manually | |
+ // TODO(llvm): 2). I didn't manage to make use of new PassManagers. | |
+ // llvm::legacy:: things should probably be removed with time. | |
+ // But for now even the llvm optimizer (llvm/tools/opt/opt.cpp) uses them. | |
+ // TODO(llvm): 3). (Probably could be resolved easily when 2. is done) | |
+ // for now we set up the passes for each module (and each function). | |
+ // It would be much nicer if we could just set the passes once | |
+ // and then in OptimizeFunciton() and OptimizeModule() simply run them. | |
+ llvm::legacy::FunctionPassManager pass_manager(module); | |
+ pass_manager_builder_.populateFunctionPassManager(pass_manager); | |
+ pass_manager.doInitialization(); | |
+ pass_manager.run(*function); | |
+ pass_manager.doFinalization(); | |
+ } | |
+ | |
+ void OptimizeModule(llvm::Module* module) { | |
+ // TODO(llvm): see OptimizeFunciton TODOs (ditto) | |
+ llvm::legacy::PassManager pass_manager; | |
+ pass_manager_builder_.populateModulePassManager(pass_manager); | |
+ pass_manager.run(*module); | |
+ } | |
+ | |
+ uint64_t GetFunctionAddress(int id) { | |
+ DCHECK(engine_); | |
+ return engine_->getFunctionAddress(std::to_string(id)); | |
+ } | |
+ | |
+ void Err() { | |
+ std::cerr << err_str_ << std::endl; | |
+ } | |
+ | |
+ // TODO(llvm): move to a separate file | |
+ void Disass(Address start, Address end) { | |
+ auto triple = x64_target_triple; | |
+ std::string err; | |
+ const llvm::Target* target = llvm::TargetRegistry::lookupTarget(triple, | |
+ err); | |
+ DCHECK(target); | |
+ std::unique_ptr<llvm::MCRegisterInfo> mri(target->createMCRegInfo(triple)); | |
+ DCHECK(mri); | |
+ std::unique_ptr<llvm::MCAsmInfo> mai(target->createMCAsmInfo(*mri, triple)); | |
+ DCHECK(mai); | |
+ std::unique_ptr<llvm::MCInstrInfo> mii(target->createMCInstrInfo()); | |
+ DCHECK(mii); | |
+ std::string feature_str; | |
+ const llvm::StringRef cpu = ""; | |
+ std::unique_ptr<llvm::MCSubtargetInfo> sti( | |
+ target->createMCSubtargetInfo(triple, cpu, feature_str)); | |
+ DCHECK(sti); | |
+ auto intel_syntax = 1; | |
+ inst_printer_ = std::unique_ptr<llvm::MCInstPrinter>( | |
+ target->createMCInstPrinter(llvm::Triple(llvm::Triple::normalize(triple)), | |
+ intel_syntax, *mai, *mii, *mri)); | |
+ inst_printer_->setPrintImmHex(true); | |
+ DCHECK(inst_printer_); | |
+ llvm::MCContext mc_context(mai.get(), mri.get(), nullptr); | |
+ std::unique_ptr<llvm::MCDisassembler> disasm( | |
+ target->createMCDisassembler(*sti, mc_context)); | |
+ DCHECK(disasm); | |
+ | |
+ | |
+ auto pos = start; | |
+ while (pos < end) { | |
+ llvm::MCInst inst; | |
+ uint64_t size; | |
+ auto address = 0; | |
+ | |
+ llvm::MCDisassembler::DecodeStatus s = disasm->getInstruction( | |
+ inst /* out */, size /* out */, llvm::ArrayRef<uint8_t>(pos, end), | |
+ address, llvm::nulls(), llvm::nulls()); | |
+ if (s == llvm::MCDisassembler::Fail) { | |
+ std::cerr << "disassembler failed at " | |
+ << reinterpret_cast<void*>(pos) << std::endl; | |
+ break; | |
+ } | |
+ llvm::errs() << pos << "\t"; | |
+ inst_printer_->printInst(&inst, llvm::errs(), "", *sti); | |
+ llvm::errs() << "\n"; | |
+ pos += size; | |
+ } | |
+ } | |
+ | |
+ int CallInstructionSizeAt(Address pc); | |
+ std::vector<RelocInfo> Patch(Address, Address, LLVMRelocationData::RelocMap&); | |
+ | |
+ static const char* x64_target_triple; | |
+ private: | |
+ LLVMContext context_; | |
+ llvm::PassManagerBuilder pass_manager_builder_; | |
+ std::unique_ptr<llvm::ExecutionEngine> engine_; | |
+ std::unique_ptr<llvm::MCInstPrinter> inst_printer_; | |
+ int count_; | |
+ MCJITMemoryManager* memory_manager_ref_; // non-owning ptr | |
+ std::string err_str_; | |
+ | |
+ LLVMGranularity() | |
+ : context_(), | |
+ pass_manager_builder_(), | |
+ engine_(nullptr), | |
+ inst_printer_(nullptr), | |
+ count_(0), | |
+ memory_manager_ref_(nullptr), | |
+ err_str_() { | |
+ llvm::InitializeNativeTarget(); | |
+ llvm::InitializeNativeTargetAsmPrinter(); | |
+ llvm::InitializeNativeTargetAsmParser(); | |
+ llvm::InitializeNativeTargetDisassembler(); | |
+// llvm::initializeCodeGen(*llvm::PassRegistry::getPassRegistry()); | |
+ pass_manager_builder_.OptLevel = 3; // -O3 | |
+ } | |
+ | |
+ std::string GenerateName() { | |
+ return std::to_string(count_++); | |
+ } | |
+ | |
+ void SetMachineAttributes(std::vector<std::string>& machine_attributes) { | |
+ // TODO(llvm): add desired machine attributes. See llc -mattr=help | |
+ // FIXME(llvm): for each attribute see, if the corresponding cpu | |
+ // feature is supported. | |
+ for (auto attr : { | |
+ "sse","sse2","sse4.1","sse4.2", | |
+ "sse4a", "ssse3", "aes", "avx", "avx2" }) { | |
+ machine_attributes.push_back(attr); | |
+ } | |
+ } | |
+ | |
+ DISALLOW_COPY_AND_ASSIGN(LLVMGranularity); | |
+}; | |
+ | |
+struct Types final : public AllStatic { | |
+ static llvm::Type* smi; | |
+ static llvm::Type* ptr_smi; | |
+ static llvm::Type* tagged; | |
+ static llvm::PointerType* ptr_tagged; | |
+ | |
+ static llvm::Type* i8; | |
+ static llvm::Type* i16; | |
+ static llvm::Type* i32; | |
+ static llvm::Type* i64; | |
+ static llvm::Type* float32; | |
+ static llvm::Type* float64; | |
+ | |
+ static llvm::PointerType* ptr_i8; | |
+ static llvm::PointerType* ptr_i16; | |
+ static llvm::PointerType* ptr_i32; | |
+ static llvm::PointerType* ptr_i64; | |
+ static llvm::PointerType* ptr_float32; | |
+ static llvm::PointerType* ptr_float64; | |
+ | |
+ static void Init(llvm::IRBuilder<>* ir_builder) { | |
+ i8 = ir_builder->getInt8Ty(); | |
+ i16 = ir_builder->getInt16Ty(); | |
+ i32 = ir_builder->getInt32Ty(); | |
+ i64 = ir_builder->getInt64Ty(); | |
+ float32 = ir_builder->getFloatTy(); | |
+ float64 = ir_builder->getDoubleTy(); | |
+ | |
+ auto address_space = 0; | |
+ ptr_i8 = ir_builder->getInt8PtrTy(); | |
+ ptr_i16 = i16->getPointerTo(); | |
+ ptr_i32 = llvm::PointerType::get(ir_builder->getInt32Ty(), address_space); | |
+ ptr_i64 = llvm::PointerType::get(ir_builder->getInt64Ty(), address_space); | |
+ ptr_float32 = llvm::PointerType::get(ir_builder->getFloatTy(), address_space); | |
+ ptr_float64 = llvm::PointerType::get(ir_builder->getDoubleTy(), | |
+ address_space); | |
+ tagged = ptr_i8; | |
+ ptr_tagged = ptr_i8->getPointerTo(); | |
+ smi = i64; | |
+ ptr_smi = smi->getPointerTo(); | |
+ } | |
+}; | |
+ | |
+class LLVMEnvironment final : public ZoneObject { | |
+ public: | |
+ LLVMEnvironment(Handle<JSFunction> closure, | |
+ FrameType frame_type, | |
+ BailoutId ast_id, | |
+ int parameter_count, | |
+ int argument_count, | |
+ int value_count, | |
+ LLVMEnvironment* outer, | |
+ HEnterInlined* entry, | |
+ Zone* zone) | |
+ : closure_(closure), | |
+ frame_type_(frame_type), | |
+ arguments_stack_height_(argument_count), | |
+ deoptimization_index_(Safepoint::kNoDeoptimizationIndex), | |
+ translation_index_(-1), | |
+ ast_id_(ast_id), | |
+ translation_size_(value_count), | |
+ parameter_count_(parameter_count), | |
+ pc_offset_(-1), | |
+ values_(value_count, zone), | |
+ is_tagged_(value_count, zone), | |
+ is_uint32_(value_count, zone), | |
+ is_double_(value_count, zone), | |
+ object_mapping_(0, zone), | |
+ outer_(outer), | |
+ entry_(entry), | |
+ zone_(zone), | |
+ has_been_used_(false) { } | |
+ | |
+ Handle<JSFunction> closure() const { return closure_; } | |
+ FrameType frame_type() const { return frame_type_; } | |
+ int arguments_stack_height() const { return arguments_stack_height_; } | |
+ LLVMEnvironment* outer() const { return outer_; } | |
+ HEnterInlined* entry() { return entry_; } | |
+ const ZoneList<llvm::Value*>* values() const { return &values_; } | |
+ BailoutId ast_id() const { return ast_id_; } | |
+ int translation_size() const { return translation_size_; } | |
+ int parameter_count() const { return parameter_count_; } | |
+ Zone* zone() const { return zone_; } | |
+ | |
+ // Marker value indicating a de-materialized object. | |
+ static llvm::Value* materialization_marker() { return nullptr; } | |
+ | |
+ bool has_been_used() const { return has_been_used_; } | |
+ void set_has_been_used() { has_been_used_ = true; } | |
+ | |
+ void AddValue(llvm::Value* value, | |
+ Representation representation, | |
+ bool is_uint32); | |
+ | |
+ bool HasTaggedValueAt(int index) const { | |
+ return is_tagged_.Contains(index); | |
+ } | |
+ | |
+ bool HasUint32ValueAt(int index) const { | |
+ return is_uint32_.Contains(index); | |
+ } | |
+ | |
+ bool HasDoubleValueAt(int index) const { | |
+ return is_double_.Contains(index); | |
+ } | |
+ | |
+ void Register(int deoptimization_index, | |
+ int translation_index, | |
+ int pc_offset) { | |
+ DCHECK(!HasBeenRegistered()); | |
+ deoptimization_index_ = deoptimization_index; | |
+ translation_index_ = translation_index; | |
+ pc_offset_ = pc_offset; | |
+ } | |
+ bool HasBeenRegistered() const { | |
+ return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex; | |
+ } | |
+ | |
+ ~LLVMEnvironment() { // FIXME(llvm): remove unused fields. | |
+ USE(pc_offset_); | |
+ } | |
+ | |
+ private: | |
+ Handle<JSFunction> closure_; | |
+ FrameType frame_type_; | |
+ int arguments_stack_height_; | |
+ int deoptimization_index_; | |
+ int translation_index_; | |
+ BailoutId ast_id_; | |
+ int translation_size_; | |
+ int parameter_count_; | |
+ int pc_offset_; | |
+ | |
+ // Value array: [parameters] [locals] [expression stack] [de-materialized]. | |
+ // |>--------- translation_size ---------<| | |
+ ZoneList<llvm::Value*> values_; | |
+ GrowableBitVector is_tagged_; | |
+ GrowableBitVector is_uint32_; | |
+ GrowableBitVector is_double_; | |
+ | |
+ // Map with encoded information about materialization_marker operands. | |
+ ZoneList<uint32_t> object_mapping_; | |
+ | |
+ LLVMEnvironment* outer_; | |
+ HEnterInlined* entry_; | |
+ Zone* zone_; | |
+ bool has_been_used_; | |
+}; | |
+ | |
+static bool MatchFunForInts(void* key1, void* key2) { | |
+ return *static_cast<int32_t*>(key1) == *static_cast<int32_t*>(key2); | |
+} | |
+ | |
+// TODO(llvm): LLVMDeoptData and LLVMRelocationData should probably be merged. | |
+class LLVMDeoptData { | |
+ public: | |
+ LLVMDeoptData(Zone* zone) | |
+ : deoptimizations_(MatchFunForInts, | |
+ ZoneHashMap::kDefaultHashMapCapacity, | |
+ ZoneAllocationPolicy(zone)), | |
+ reverse_deoptimizations_(), | |
+ translations_(zone), | |
+ deoptimization_literals_(8, zone), | |
+ zone_(zone) {} | |
+ | |
+ void Add(LLVMEnvironment* environment, int32_t patchpoint_id); | |
+ LLVMEnvironment* GetEnvironmentByPatchpointId(int32_t patchpoint_id); | |
+ int32_t GetPatchpointIdByEnvironment(LLVMEnvironment* env); | |
+ | |
+ TranslationBuffer& translations() { return translations_; } | |
+ ZoneList<Handle<Object> >& deoptimization_literals() { | |
+ return deoptimization_literals_; | |
+ } | |
+ | |
+ int DeoptCount() { return deoptimizations_.occupancy(); } | |
+ | |
+ int DefineDeoptimizationLiteral(Handle<Object> literal); | |
+ | |
+ private: | |
+ void* GetKey(int32_t patchpoint_id); | |
+ uint32_t GetHash(int32_t patchpoint_id); | |
+ // Patchpoint_id -> LLVMEnvironment* | |
+ ZoneHashMap deoptimizations_; | |
+ // LLVMEnvironment* -> Patchpoint_id | |
+ // FIXME(llvm): consistency: this one is stdmap and the one above is ZoneHMap. | |
+ std::map<LLVMEnvironment*, int32_t> reverse_deoptimizations_; | |
+ TranslationBuffer translations_; | |
+ ZoneList<Handle<Object> > deoptimization_literals_; | |
+ | |
+ Zone* zone_; | |
+}; | |
+ | |
+class LLVMChunk final : public LowChunk { | |
+ public: | |
+ virtual ~LLVMChunk(); | |
+ LLVMChunk(CompilationInfo* info, HGraph* graph) | |
+ : LowChunk(info, graph), | |
+ llvm_function_id_(-1), | |
+ reloc_data_(nullptr), | |
+ deopt_data_(nullptr), | |
+ masm_(info->isolate(), nullptr, 0), | |
+ target_index_for_ppid_(), | |
+ deopt_target_offset_for_ppid_(), | |
+ inlined_functions_(1, info->zone()) {} | |
+ | |
+ using PpIdToIndexMap = std::map<int32_t, uint32_t>; | |
+ using PpIdToOffsetMap = std::map<int32_t, std::ptrdiff_t>; | |
+ | |
+ static LLVMChunk* NewChunk(HGraph *graph); | |
+ | |
+ Handle<Code> Codegen() override; | |
+ | |
+ void set_llvm_function_id(int id) { llvm_function_id_ = id; } | |
+ int llvm_function_id() { return llvm_function_id_; } | |
+ | |
+ const ZoneList<Handle<SharedFunctionInfo>>& inlined_functions() const { | |
+ return inlined_functions_; | |
+ } | |
+ | |
+ void set_deopt_data(std::unique_ptr<LLVMDeoptData> deopt_data) { | |
+ deopt_data_ = std::move(deopt_data); | |
+ } | |
+ void set_reloc_data(LLVMRelocationData* reloc_data) { | |
+ reloc_data_ = reloc_data; | |
+ reloc_data->DumpSafepointIds(); | |
+ reloc_data->transfer(); | |
+ } | |
+ Assembler& masm() { return masm_; } | |
+ PpIdToIndexMap& target_index_for_ppid() { | |
+ return target_index_for_ppid_; | |
+ } | |
+ PpIdToOffsetMap& deopt_target_offset_for_ppid() { | |
+ return deopt_target_offset_for_ppid_; | |
+ } | |
+ | |
+ void AddInlinedFunction(Handle<SharedFunctionInfo> closure) { | |
+ inlined_functions_.Add(closure, zone()); | |
+ } | |
+ int GetParameterStackSlot(int index) const; | |
+ | |
+ private: | |
+ static const int kStackSlotSize = kPointerSize; | |
+ static const int kPhonySpillCount = 3; // rbp, rsi, rdi | |
+ | |
+ static int SpilledCount(const StackMaps& stackmaps); | |
+ | |
+ std::vector<RelocInfo> SetUpRelativeCalls(Address start, | |
+ const StackMaps& stackmaps); | |
+ StackMaps GetStackMaps(); | |
+ void SetUpDeoptimizationData(Handle<Code> code, StackMaps& stackmaps); | |
+ void EmitSafepointTable(Assembler* code_desc, | |
+ StackMaps& stackmaps, | |
+ Address instruction_start); | |
+ Vector<byte> GetFullRelocationInfo( | |
+ CodeDesc& code_desc, | |
+ const std::vector<RelocInfo>& reloc_data_from_patchpoints); | |
+ // Returns translation index of the newly generated translation | |
+ int WriteTranslationFor(LLVMEnvironment* env, const StackMaps& stackmaps); | |
+ void WriteTranslation(LLVMEnvironment* environment, | |
+ Translation* translation, | |
+ const StackMaps& stackmaps, | |
+ int32_t patchpoint_id, | |
+ int start_index); | |
+ void AddToTranslation(LLVMEnvironment* environment, | |
+ Translation* translation, | |
+ llvm::Value* op, //change | |
+ StackMaps::Location& location, | |
+ const std::vector<StackMaps::Constant> constants, | |
+ bool is_tagged, | |
+ bool is_uint32, | |
+ bool is_double, | |
+ int* object_index_pointer, | |
+ int* dematerialized_index_pointer); | |
+ | |
+ int llvm_function_id_; | |
+ // Ownership gets transferred from LLVMChunkBuilder | |
+ LLVMRelocationData* reloc_data_; | |
+ // Ownership gets transferred from LLVMChunkBuilder | |
+ std::unique_ptr<LLVMDeoptData> deopt_data_; | |
+ // FIXME(llvm): memory leak. Assembler is Malloced and doesn't die either. | |
+ Assembler masm_; | |
+ // FIXME(llvm): memory leak | |
+ // (this map allocates keys on the heap and doesn't die). | |
+ // Map patchpointId -> index in masm_.code_targets_ | |
+ PpIdToIndexMap target_index_for_ppid_; | |
+ PpIdToOffsetMap deopt_target_offset_for_ppid_; | |
+ // TODO(llvm): hoist to base class. | |
+ ZoneList<Handle<SharedFunctionInfo>> inlined_functions_; | |
+}; | |
+ | |
+class LLVMChunkBuilder final : public LowChunkBuilderBase { | |
+ public: | |
+ LLVMChunkBuilder(CompilationInfo* info, HGraph* graph) | |
+ : LowChunkBuilderBase(info, graph), | |
+ current_instruction_(nullptr), | |
+ current_block_(nullptr), | |
+ next_block_(nullptr), | |
+ module_(nullptr), | |
+ function_(nullptr), | |
+ llvm_ir_builder_(nullptr), | |
+ deopt_data_(llvm::make_unique<LLVMDeoptData>(info->zone())), | |
+ reloc_data_(nullptr), | |
+ pending_pushed_args_(4, info->zone()), | |
+ osr_preserved_values_(4, info->zone()), | |
+ emit_debug_code_(FLAG_debug_code), | |
+ volatile_zero_address_(nullptr), | |
+ global_receiver_(nullptr), | |
+ pointers_(), | |
+ number_of_pointers_(-1) { | |
+ reloc_data_ = new(zone()) LLVMRelocationData(zone()); | |
+ } | |
+ ~LLVMChunkBuilder() {} | |
+ | |
+ static llvm::Type* GetLLVMType(Representation r); | |
+ | |
+ LLVMChunk* chunk() const { return static_cast<LLVMChunk*>(chunk_); }; | |
+ void set_emit_degug_code(bool v) { emit_debug_code_ = v; } | |
+ bool emit_debug_code() { return emit_debug_code_; } | |
+ LLVMChunkBuilder& Build(); | |
+ // LLVM requires that each phi input's label be a basic block | |
+ // immediately preceding the given BB. | |
+ // Hydrogen does not impose such a constraint. | |
+ // For that reason our phis are not LLVM-compliant right after phi resolution. | |
+ LLVMChunkBuilder& NormalizePhis(); | |
+ LLVMChunkBuilder& GiveNamesToPointerValues(); | |
+ LLVMChunkBuilder& PlaceStatePoints(); | |
+ LLVMChunkBuilder& RewriteStatePoints(); | |
+ LLVMChunkBuilder& Optimize(); // invoke llvm transformation passes for the function | |
+ LLVMChunk* Create(); | |
+ | |
+ LLVMEnvironment* AssignEnvironment(); | |
+ LLVMEnvironment* CreateEnvironment( | |
+ HEnvironment* hydrogen_env, int* argument_index_accumulator, | |
+ ZoneList<HValue*>* objects_to_materialize); | |
+ | |
+ void DeoptimizeIf(llvm::Value* compare, | |
+ Deoptimizer::DeoptReason deopt_reason, | |
+ bool negate = false, | |
+ llvm::BasicBlock* next_block = nullptr); | |
+ | |
+ void DoNumberTagU(HChange* instr); | |
+ // Declare methods that deal with the individual node types. | |
+#define DECLARE_DO(type) void Do##type(H##type* node); | |
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) | |
+#undef DECLARE_DO | |
+ static const uintptr_t kExtFillingValue = 0xabcdbeef; | |
+ static const char* kGcStrategyName; | |
+ static const std::string kPointersPrefix; | |
+ | |
+ private: | |
+ static const int kSmiShift = kSmiTagSize + kSmiShiftSize; | |
+ static const int kMaxCallSequenceLen = 16; // FIXME(llvm): find out max size. | |
+ | |
+ static llvm::CmpInst::Predicate TokenToPredicate(Token::Value op, | |
+ bool is_unsigned, | |
+ bool is_double = false); | |
+ static bool HasTaggedValue(HValue* value); | |
+ | |
+ void GetAllEnvironmentValues(LLVMEnvironment* environment, | |
+ std::vector<llvm::Value*>& mapped_values); | |
+ void CreateSafepointPollFunction(); | |
+ void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block); | |
+ void VisitInstruction(HInstruction* current); | |
+ void PatchReceiverToGlobalProxy(); | |
+ llvm::Value* GetParameter(int index); | |
+ void DoPhi(HPhi* phi); | |
+ void ResolvePhis(); | |
+ void ResolvePhis(HBasicBlock* block); | |
+ void CreateVolatileZero(); | |
+ llvm::Value* GetVolatileZero(); | |
+ llvm::Value* BuildFastArrayOperand(HValue*, llvm::Value*, | |
+ ElementsKind, uint32_t); | |
+ llvm::Value* ConstFoldBarrier(llvm::Value* imm); | |
+ llvm::BasicBlock* NewBlock(const std::string& name, | |
+ llvm::Function* = nullptr); | |
+ // if the llvm counterpart of the block does not exist, create it | |
+ llvm::BasicBlock* Use(HBasicBlock* block); | |
+ llvm::Value* Use(HValue* value); | |
+ llvm::Value* SmiToInteger32(HValue* value); | |
+ llvm::Value* Integer32ToSmi(HValue* value); | |
+ llvm::Value* Integer32ToSmi(llvm::Value* value); | |
+ // Is the value (not) a smi? | |
+ llvm::Value* SmiCheck(llvm::Value* value, bool negate = false); | |
+ void AssertSmi(llvm::Value* value, bool assert_not_smi = false); | |
+ void AssertNotSmi(llvm::Value* value); | |
+ void Assert(llvm::Value* condition, llvm::BasicBlock* next_block = nullptr); | |
+ void InsertDebugTrap(); | |
+ void IncrementCounter(StatsCounter* counter, int value); | |
+ llvm::Value* CallVoid(Address target); | |
+ llvm::Value* CallAddressForMathPow(Address target, | |
+ llvm::CallingConv::ID calling_conv, | |
+ std::vector<llvm::Value*>& params); | |
+ // These Call functions are intended to be highly reusable. | |
+ // TODO(llvm): default parameters -- not very good. | |
+ // (Especially with different default values for different methods). | |
+ llvm::Value* CallVal(llvm::Value* callable_value, | |
+ llvm::CallingConv::ID calling_conv, | |
+ std::vector<llvm::Value*>& params, | |
+ llvm::Type* return_type = nullptr, // void return type | |
+ bool record_safepoint = true); | |
+ llvm::Value* CallCode(Handle<Code> code, | |
+ llvm::CallingConv::ID calling_conv, | |
+ std::vector<llvm::Value*>& params, | |
+ bool record_safepoint = true); | |
+ llvm::Value* CallAddress(Address target, | |
+ llvm::CallingConv::ID calling_conv, | |
+ std::vector<llvm::Value*>& params, | |
+ llvm::Type* return_type = nullptr); | |
+ void CheckEnumCache(HValue* enum_val, llvm::Value* val, llvm::BasicBlock* bb); | |
+ llvm::Value* EnumLength(llvm::Value* map_); | |
+ llvm::Value* FieldOperand(llvm::Value* base, int offset); | |
+ llvm::Value* LoadFieldOperand(llvm::Value* base, | |
+ int offset, | |
+ const char* name = ""); | |
+ llvm::Value* ValueFromSmi(Smi* smi); | |
+ llvm::Value* CreateConstant(HConstant* instr, HBasicBlock* block = NULL); | |
+ llvm::Value* ConstructAddress(llvm::Value* base, int64_t offset); | |
+ llvm::Value* MoveHeapObject(Handle<Object> obj); | |
+ llvm::Value* Move(Handle<Object> object, RelocInfo::Mode rmode); | |
+ llvm::Value* Compare(llvm::Value* lhs, llvm::Value* rhs); | |
+ llvm::Value* Compare(llvm::Value* lhs, Handle<Object> rhs); | |
+ llvm::Value* CompareMap(llvm::Value* object, Handle<Map> map); | |
+ llvm::Value* CheckPageFlag(llvm::Value* object, int mask); | |
+ // Allocate a heap number in new space with undefined value. Returns | |
+ // tagged pointer in result register, or jumps to gc_required if new | |
+ // space is full. // FIXME(llvm): the comment | |
+ llvm::Value* AllocateHeapNumberSlow(HValue* instr = nullptr, | |
+ llvm::Value* = nullptr); | |
+ llvm::Value* AllocateHeapNumber(MutableMode mode = IMMUTABLE); | |
+ llvm::Value* Allocate(llvm::Value* object_size, | |
+ llvm::Value* (LLVMChunkBuilder::*fptr) | |
+ (HValue* instr, llvm::Value*), | |
+ AllocationFlags flag, | |
+ HValue* instr = nullptr, | |
+ llvm::Value* temp = nullptr); | |
+ llvm::Value* AllocateSlow(HValue* instr, llvm::Value* temp); | |
+ llvm::Value* RegExpLiteralSlow(HValue* instr, llvm::Value* phi); | |
+ llvm::Value* LoadAllocationTopHelper(AllocationFlags flags); | |
+ void UpdateAllocationTopHelper(llvm::Value* result_end, AllocationFlags flags); | |
+ void DirtyHack(int arg_count); | |
+ llvm::CallingConv::ID GetCallingConv(CallInterfaceDescriptor descriptor); | |
+ llvm::Value* CallRuntime(const Runtime::Function*); | |
+ llvm::Value* CallRuntimeViaId(Runtime::FunctionId id); | |
+ llvm::Value* CallRuntimeFromDeferred(Runtime::FunctionId id, llvm::Value* context, std::vector<llvm::Value*>); | |
+ llvm::Value* GetContext(); | |
+ llvm::Value* GetNan(); | |
+ llvm::Value* LoadRoot(Heap::RootListIndex index); | |
+ llvm::Value* CompareRoot(llvm::Value* val, Heap::RootListIndex index, | |
+ llvm::CmpInst::Predicate = llvm::CmpInst::ICMP_EQ); | |
+ llvm::Value* CmpObjectType(llvm::Value* heap_object, | |
+ InstanceType type, | |
+ llvm::CmpInst::Predicate = llvm::CmpInst::ICMP_EQ); | |
+ llvm::Value* RecordRelocInfo(uint64_t intptr_value, RelocInfo::Mode rmode); | |
+ void RecordWriteForMap(llvm::Value* object, llvm::Value* map); | |
+ void RecordWriteField(llvm::Value* object, | |
+ llvm::Value* key_reg, | |
+ int offset, | |
+ enum SmiCheck smi_check, | |
+ PointersToHereCheck ptr_check, | |
+ RememberedSetAction set); | |
+ void RecordWrite(llvm::Value* object, | |
+ llvm::Value* map, | |
+ llvm::Value* value, | |
+ PointersToHereCheck ptr_check, | |
+ RememberedSetAction set, | |
+ enum SmiCheck smi_check); | |
+ void ChangeTaggedToDouble(HValue* val, HChange* instr); | |
+ void ChangeDoubleToI(HValue* val, HChange* instr); | |
+ void ChangeDoubleToTagged(HValue* val, HChange* instr); | |
+ void ChangeTaggedToISlow(HValue* val, HChange* instr); | |
+ void BranchTagged(HBranch* instr, | |
+ ToBooleanStub::Types expected, | |
+ llvm::BasicBlock* true_target, | |
+ llvm::BasicBlock* false_target); | |
+ | |
+ std::vector<llvm::Value*> GetSafepointValues(HInstruction* instr); | |
+ void DoDummyUse(HInstruction* instr); | |
+ void DoStoreKeyedFixedArray(HStoreKeyed* value); | |
+ void DoLoadKeyedFixedArray(HLoadKeyed* value); | |
+ void DoLoadKeyedExternalArray(HLoadKeyed* value); | |
+ void DoStoreKeyedExternalArray(HStoreKeyed* value); | |
+ void DoLoadKeyedFixedDoubleArray(HLoadKeyed* value); | |
+ void DoStoreKeyedFixedDoubleArray(HStoreKeyed* value); | |
+ void Retry(BailoutReason reason); | |
+ void AddStabilityDependency(Handle<Map> map); | |
+ void AddDeprecationDependency(Handle<Map> map); | |
+ void CallStackMap(int stackmap_id, llvm::Value* value); | |
+ void CallStackMap(int stackmap_id, std::vector<llvm::Value*>& values); | |
+ llvm::CallInst* CallPatchPoint(int32_t stackmap_id, | |
+ llvm::Value* target_function, | |
+ std::vector<llvm::Value*>& function_args, | |
+ std::vector<llvm::Value*>& live_values, | |
+ int covering_nop_size = kMaxCallSequenceLen); | |
+ llvm::Value* CallStatePoint(int32_t stackmap_id, | |
+ llvm::Value* target_function, | |
+ llvm::CallingConv::ID calling_conv, | |
+ std::vector<llvm::Value*>& function_args, | |
+ int covering_nop_size); | |
+ void DoMathAbs(HUnaryMathOperation* instr); | |
+ void DoIntegerMathAbs(HUnaryMathOperation* instr); | |
+ void DoSmiMathAbs(HUnaryMathOperation* instr); | |
+ void DoMathPowHalf(HUnaryMathOperation* instr); | |
+ void DoMathSqrt(HUnaryMathOperation* instr); | |
+ void DoMathRound(HUnaryMathOperation* instr); | |
+ void DoModByPowerOf2I(HMod* instr); | |
+ void DoModByConstI(HMod* instr); | |
+ void DoModI(HMod* instr); | |
+ void DoMathFloor(HUnaryMathOperation* instr); | |
+ void DoMathLog(HUnaryMathOperation* instr); | |
+ void DoMathExp(HUnaryMathOperation* instr); | |
+ llvm::Value* ExternalOperand(ExternalReference offset); | |
+ int64_t RootRegisterDelta(ExternalReference offset); | |
+ void PrepareCallCFunction(int num_arguments); | |
+ int ArgumentStackSlotsForCFunctionCall(int num_arguments); | |
+ llvm::Value* CallCFunction(ExternalReference function, std::vector<llvm::Value*>, int num_arguments); | |
+ llvm::Value* LoadAddress(ExternalReference); | |
+ void DumpPointerValues(); | |
+ llvm::Value* CmpInstanceType(llvm::Value* value, InstanceType type, | |
+ llvm::CmpInst::Predicate pred = llvm::CmpInst::ICMP_EQ); | |
+ // TODO(llvm): probably pull these up to LowChunkBuilderBase | |
+ HInstruction* current_instruction_; | |
+ HBasicBlock* current_block_; | |
+ HBasicBlock* next_block_; | |
+ // module_ ownership is later passed to the execution engine (MCJIT) | |
+ std::unique_ptr<llvm::Module> module_; | |
+ // Non-owning pointer to the function inside llvm module. | |
+ // Not to be used for fetching the actual native code, | |
+ // since the corresponding methods are deprecated. | |
+ llvm::Function* function_; | |
+ llvm::Function* safepoint_poll_; | |
+ std::unique_ptr<llvm::IRBuilder<>> llvm_ir_builder_; | |
+ std::unique_ptr<LLVMDeoptData> deopt_data_; | |
+ LLVMRelocationData* reloc_data_; | |
+ ZoneList<llvm::Value*> pending_pushed_args_; | |
+ ZoneList<llvm::Value*> osr_preserved_values_; | |
+ bool emit_debug_code_; | |
+ llvm::Value* volatile_zero_address_; | |
+ llvm::Value* global_receiver_; | |
+ // TODO(llvm): choose more appropriate data structure (maybe in the zone). | |
+ // Or even some fancy lambda to pass to createAppendLivePointersToSafepoints. | |
+ std::set<llvm::Value*> pointers_; | |
+ int number_of_pointers_; | |
+ enum ScaleFactor { | |
+ times_1 = 0, | |
+ times_2 = 1, | |
+ times_4 = 2, | |
+ times_8 = 3, | |
+ times_int_size = times_4, | |
+ times_half_pointer_size = times_2, | |
+ times_pointer_size = times_4, | |
+ times_twice_pointer_size = times_8 | |
+ }; | |
+}; | |
+ | |
+} // namespace internal | |
+} // namespace v8 | |
+#endif // V8_LLVM_CHUNK_H_ | |
diff --git a/src/llvm/llvm-headers.h b/src/llvm/llvm-headers.h | |
new file mode 100644 | |
index 0000000..cdb70c8 | |
--- /dev/null | |
+++ b/src/llvm/llvm-headers.h | |
@@ -0,0 +1,91 @@ | |
+// Copyright 2015 ISP RAS. All rights reserved. | |
+// Use of this source code is governed by a BSD-style license that can be | |
+// found in the LICENSE file. | |
+ | |
+#ifndef V8_LLVM_HEADERS_H_ | |
+#define V8_LLVM_HEADERS_H_ | |
+ | |
+#if DEBUG | |
+#define LLV8_HAD_DEBUG | |
+#undef DEBUG | |
+#endif | |
+ | |
+// FIXME(llvm): remove unneeded headers | |
+// FIXME(llvm): sort headers (style) | |
+#include <iostream> | |
+ | |
+#include "llvm/IR/IRBuilder.h" | |
+#include "llvm/Analysis/Passes.h" | |
+#include "llvm/Analysis/TargetTransformInfo.h" | |
+#include "llvm/ADT/DenseMap.h" | |
+#include "llvm/ADT/DenseSet.h" | |
+#include "llvm/ADT/STLExtras.h" | |
+#include "llvm/ADT/SetOperations.h" | |
+#include "llvm/CodeGen/GCStrategy.h" | |
+#include "llvm/IR/LLVMContext.h" | |
+#include "llvm/IR/Module.h" | |
+#include "llvm/ExecutionEngine/GenericValue.h" | |
+#include "llvm/ExecutionEngine/ExecutionEngine.h" | |
+#include "llvm/IR/Constants.h" | |
+#include "llvm/CodeGen/LinkAllCodegenComponents.h" | |
+#include "llvm/IR/DerivedTypes.h" | |
+#include "llvm/IR/Dominators.h" | |
+#include "llvm/IR/InstrTypes.h" | |
+#include "llvm/IR/Instructions.h" | |
+#include "llvm/IR/InstIterator.h" | |
+#include "llvm/IR/LLVMContext.h" | |
+#include "llvm/IR/MDBuilder.h" | |
+#include "llvm/IR/PassManager.h" | |
+#include "llvm/IR/Statepoint.h" | |
+#include "llvm/IR/CallingConv.h" | |
+#include "llvm/IR/Verifier.h" | |
+#include "llvm/IR/ValueSymbolTable.h" | |
+#include "llvm/MC/MCAsmInfo.h" | |
+#include "llvm/MC/MCContext.h" | |
+#include "llvm/MC/MCDisassembler.h" | |
+#include "llvm/MC/MCInst.h" | |
+#include "llvm/MC/MCInstrAnalysis.h" | |
+#include "llvm/MC/MCInstrInfo.h" | |
+#include "llvm/MC/MCInstPrinter.h" | |
+#include "llvm/MC/MCObjectFileInfo.h" | |
+#include "llvm/MC/MCRegisterInfo.h" | |
+#include "llvm/MC/MCSubtargetInfo.h" | |
+#include "llvm/Support/ManagedStatic.h" | |
+#include "llvm/Support/TargetRegistry.h" | |
+#include "llvm/Support/TargetSelect.h" | |
+#include "llvm/Support/raw_ostream.h" | |
+#include "llvm/Support/Casting.h" | |
+#include "llvm/Support/CodeGen.h" | |
+#include "llvm/Transforms/Utils/BasicBlockUtils.h" | |
+#include "llvm/Transforms/Utils/Local.h" | |
+#include "llvm/Transforms/Utils/PromoteMemToReg.h" | |
+#include "llvm/ExecutionEngine/MCJIT.h" | |
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h" | |
+ | |
+#include "llvm/Analysis/Passes.h" | |
+#include "llvm/Analysis/TargetTransformInfo.h" | |
+#include "llvm/Transforms/IPO.h" | |
+#include "llvm/Transforms/IPO/PassManagerBuilder.h" | |
+#include "llvm/Transforms/Scalar.h" | |
+ | |
+#include "llvm/IR/Function.h" | |
+ | |
+#include "llvm/IR/LegacyPassManager.h" | |
+#include "llvm/PassSupport.h" | |
+ | |
+#undef DEBUG // Undef the llvm DEBUG | |
+ | |
+#ifdef LLV8_HAD_DEBUG | |
+#define DEBUG 1 | |
+#endif | |
+ | |
+namespace v8 { | |
+namespace internal { | |
+ | |
+using llvm::LLVMContext; | |
+//using llvm::Module; | |
+ | |
+} // namespace internal | |
+} // namespace v8 | |
+ | |
+#endif // V8_LLVM_HEADERS_H_ | |
diff --git a/src/llvm/llvm-stackmaps.cc b/src/llvm/llvm-stackmaps.cc | |
new file mode 100644 | |
index 0000000..929a944 | |
--- /dev/null | |
+++ b/src/llvm/llvm-stackmaps.cc | |
@@ -0,0 +1,329 @@ | |
+// Copyright (C) 2013, 2014 Apple Inc. All rights reserved. | |
+// | |
+// Redistribution and use in source and binary forms, with or without | |
+// modification, are permitted provided that the following conditions | |
+// are met: | |
+// 1. Redistributions of source code must retain the above copyright | |
+// notice, this list of conditions and the following disclaimer. | |
+// 2. Redistributions in binary form must reproduce the above copyright | |
+// notice, this list of conditions and the following disclaimer in the | |
+// documentation and/or other materials provided with the distribution. | |
+// | |
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
+ | |
+// This code has been ported from JavaScriptCore (see FTLStackMaps.cpp). | |
+// Copyright 2015 ISP RAS. All rights reserved. | |
+ | |
+#include "llvm-stackmaps.h" | |
+ | |
+#include <algorithm> | |
+ | |
+namespace v8 { | |
+namespace internal { | |
+ | |
+template<typename T> | |
+T readObject(StackMaps::ParseContext& context) { | |
+ T result; | |
+ result.parse(context); | |
+ return result; | |
+} | |
+ | |
+StackMapReg DWARFRegister::reg() const { | |
+ /*if (dwarf_reg_num_ < 0 || | |
+ dwarf_reg_num_ >= Register::kNumRegisters) { | |
+ UNIMPLEMENTED(); | |
+ //return Register::from_code(15); | |
+ } | |
+ int const map[] = { 0, 2, 1, 3, 6, 7, 5, 4, 8, 9, 10, 11, 12, 13, 14, 15 }; | |
+ return Register::from_code(map[dwarf_reg_num_]);*/ | |
+ return StackMapReg::FromIndex(dwarf_reg_num_); | |
+} | |
+ | |
+void DWARFRegister::dump(std::ostream& os) const { | |
+ os << this->reg().ToString(); | |
+} | |
+ | |
+void StackMaps::Constant::parse(StackMaps::ParseContext& context) { | |
+ integer = context.view->read<int64_t>(true); | |
+} | |
+ | |
+void StackMaps::Constant::dump(std::ostream& os) const { | |
+ os << static_cast<unsigned long long>(integer); | |
+} | |
+ | |
+void StackMaps::StackSize::parse(StackMaps::ParseContext& context) { | |
+ switch (context.version) { | |
+ case 0: | |
+ functionOffset = context.view->read<uint32_t>(true); | |
+ size = context.view->read<uint32_t>(true); | |
+ break; | |
+ | |
+ default: | |
+ functionOffset = context.view->read<uint64_t>(true); | |
+ size = context.view->read<uint64_t>(true); | |
+ break; | |
+ } | |
+} | |
+ | |
+void StackMaps::StackSize::dump(std::ostream& os) const { | |
+ os << "(off:" << functionOffset << ", size:" << size << ")"; | |
+} | |
+ | |
+void StackMaps::Location::parse(StackMaps::ParseContext& context) { | |
+ kind = static_cast<Kind>(context.view->read<uint8_t>(true)); | |
+ size = context.view->read<uint8_t>(true); | |
+ dwarf_reg = DWARFRegister(context.view->read<uint16_t>(true)); | |
+ this->offset = context.view->read<int32_t>(true); | |
+} | |
+ | |
+const char* StackMaps::Location::ToString( | |
+ StackMaps::Location::Kind kind) { | |
+ switch (kind) { | |
+ case StackMaps::Location::kRegister: | |
+ return "Register"; | |
+ break; | |
+ case StackMaps::Location::kDirect: | |
+ return "Direct"; | |
+ break; | |
+ case StackMaps::Location::kIndirect: | |
+ return "Indirect"; | |
+ break; | |
+ case StackMaps::Location::kConstant: | |
+ return "Constant"; | |
+ break; | |
+ case StackMaps::Location::kConstantIndex: | |
+ return "ConstantIndex"; | |
+ break; | |
+ default: | |
+ UNREACHABLE(); | |
+ return nullptr; | |
+ } | |
+} | |
+ | |
+void StackMaps::Location::dump(std::ostream& os) const { | |
+ os << "(" << ToString(kind) << ", " | |
+ << dwarf_reg << ", off:" | |
+ << offset << ", size:" | |
+ << static_cast<unsigned int>(size) << ")"; | |
+} | |
+ | |
+//GPRReg StackMaps::Location::directGPR() const { | |
+// return FTL::Location::forStackmaps(nullptr, *this).directGPR(); | |
+//} | |
+// | |
+//void StackMaps::Location::restoreInto( | |
+// MacroAssembler& jit, StackMaps& stackmaps, char* savedRegisters, GPRReg result) const { | |
+// FTL::Location::forStackmaps(&stackmaps, *this).restoreInto(jit, savedRegisters, result); | |
+//} | |
+ | |
+void StackMaps::LiveOut::parse(StackMaps::ParseContext& context) { | |
+ dwarfReg = DWARFRegister(context.view->read<uint16_t>(true)); // regnum | |
+ context.view->read<uint8_t>(true); // reserved | |
+ size = context.view->read<uint8_t>(true); // size in bytes | |
+} | |
+ | |
+void StackMaps::LiveOut::dump(std::ostream& os) const { | |
+ os << "(" << dwarfReg << ", " << size << ")"; | |
+} | |
+ | |
+bool StackMaps::Record::parse(StackMaps::ParseContext& context) { | |
+ int64_t id = context.view->read<int64_t>(true); | |
+ DCHECK(static_cast<int32_t>(id) == id); | |
+ patchpointID = static_cast<uint32_t>(id); | |
+ if (static_cast<int32_t>(patchpointID) < 0) | |
+ return false; | |
+ | |
+ instructionOffset = context.view->read<uint32_t>(true); | |
+ flags = context.view->read<uint16_t>(true); | |
+ | |
+ unsigned length = context.view->read<uint16_t>(true); | |
+ while (length--) | |
+ locations.push_back(readObject<Location>(context)); | |
+ | |
+ if (context.version >= 1) | |
+ context.view->read<uint16_t>(true); // padding | |
+ | |
+ unsigned numLiveOuts = context.view->read<uint16_t>(true); | |
+ while (numLiveOuts--) | |
+ live_outs.push_back(readObject<LiveOut>(context)); | |
+ | |
+ if (context.version >= 1) { | |
+ if (context.view->offset() & 7) { | |
+ DCHECK(!(context.view->offset() & 3)); | |
+ context.view->read<uint32_t>(true); // padding | |
+ } | |
+ } | |
+ | |
+ return true; | |
+} | |
+ | |
+void StackMaps::Record::dump(std::ostream& os) const { | |
+ os << "(#" << patchpointID << ", offset = " | |
+ << instructionOffset << ", flags = " | |
+ << flags << ", locations = " | |
+ << "[" ; | |
+ std::for_each(locations.begin(), locations.end(), | |
+ [&os](const Location &n){ os << n << ", "; }); | |
+ os << "] live_outs = ["; | |
+ std::for_each(live_outs.begin(), live_outs.end(), | |
+ [&os](const LiveOut &n){ os << n << ", "; }); | |
+ os << "])"; | |
+} | |
+// | |
+//RegisterSet StackMaps::Record::locationSet() const { | |
+// RegisterSet result; | |
+// for (unsigned i = locations.size(); i--;) { | |
+// Register reg = locations[i].dwarfReg.reg(); | |
+// if (!reg) continue; // FIXME(llvm): what does it mean now? | |
+// result.set(reg); | |
+// } | |
+// return result; | |
+//} | |
+// | |
+//RegisterSet StackMaps::Record::liveOutsSet() const { | |
+// RegisterSet result; | |
+// for (unsigned i = live_outs.size(); i--;) { | |
+// LiveOut liveOut = live_outs[i]; | |
+// Reg reg = liveOut.dwarfReg.reg(); | |
+// // FIXME: Either assert that size is not greater than sizeof(pointer), or actually | |
+// // save the high bits of registers. | |
+// // https://bugs.webkit.org/show_bug.cgi?id=130885 | |
+// if (!reg) { | |
+// UNREACHABLE(); | |
+// } | |
+// result.set(reg); | |
+// } | |
+// return result; | |
+//} | |
+// | |
+//RegisterSet StackMaps::Record::usedRegisterSet() const { | |
+// RegisterSet result; | |
+// result.merge(locationSet()); | |
+// result.merge(liveOutsSet()); | |
+// return result; | |
+//} | |
+ | |
+bool StackMaps::parse(DataView* view) { | |
+ ParseContext context; | |
+ context.view = view; | |
+ | |
+ version = context.version = context.view->read<uint8_t>(true); | |
+ | |
+ context.view->read<uint8_t>(true); // Reserved | |
+ context.view->read<uint8_t>(true); // Reserved | |
+ context.view->read<uint8_t>(true); // Reserved | |
+ | |
+ uint32_t numFunctions; | |
+ uint32_t numConstants; | |
+ uint32_t numRecords; | |
+ | |
+ numFunctions = context.view->read<uint32_t>(true); | |
+ if (context.version >= 1) { | |
+ numConstants = context.view->read<uint32_t>(true); | |
+ numRecords = context.view->read<uint32_t>(true); | |
+ } | |
+ while (numFunctions--) | |
+ stack_sizes.push_back(readObject<StackSize>(context)); | |
+ | |
+ if (!context.version) | |
+ numConstants = context.view->read<uint32_t>(true); | |
+ while (numConstants--) | |
+ constants.push_back(readObject<Constant>(context)); | |
+ | |
+ if (!context.version) | |
+ numRecords = context.view->read<uint32_t>(true); | |
+ while (numRecords--) { | |
+ Record record; | |
+ if (!record.parse(context)) | |
+ return false; | |
+ records.push_back(record); | |
+ } | |
+ | |
+ return true; | |
+} | |
+ | |
+void StackMaps::dump(std::ostream& os) const { | |
+ os << "Version:" << version << ", StackSizes["; | |
+ std::for_each(stack_sizes.begin(), stack_sizes.end(), | |
+ [&os](const StackSize &n){ os << n << ", "; }); | |
+ os << "], Constants:["; | |
+ std::for_each(constants.begin(), constants.end(), | |
+ [&os](const Constant &n){ os << n << ", "; }); | |
+ os << "], Records:["; | |
+ std::for_each(records.begin(), records.end(), | |
+ [&os](const Record &n){ os << n << ", "; }); | |
+ os << "]"; | |
+} | |
+ | |
+void StackMaps::dumpMultiline(std::ostream& os, const char* prefix) const { | |
+ os << prefix << "Version: " << version << "\n"; | |
+ os << prefix << "StackSizes:\n"; | |
+ for (unsigned i = 0; i < stack_sizes.size(); ++i) | |
+ os << prefix << " " << stack_sizes[i] << "\n"; | |
+ os << prefix << "Constants:\n"; | |
+ for (unsigned i = 0; i < constants.size(); ++i) | |
+ os << prefix << " " << constants[i] << "\n"; | |
+ os << prefix << "Records:\n"; | |
+ for (unsigned i = 0; i < records.size(); ++i) | |
+ os << prefix << " " << records[i] << "\n"; | |
+} | |
+ | |
+StackMaps::RecordMap StackMaps::computeRecordMap() const { | |
+ // FIXME(llvm): seems that it's best to cache results (but see usage). | |
+ RecordMap result; | |
+ for (auto i = records.size(); i--;) | |
+ result[records[i].patchpointID] = records[i]; // Careful (life-time)! | |
+ return result; | |
+} | |
+ | |
+uint64_t StackMaps::stackSize() const { | |
+ // There must always be at least one Stack Map section. | |
+ // And we don't support multiple functions in a module at the time. | |
+ CHECK(stack_sizes.size() == 1); | |
+ return stack_sizes[0].size; | |
+} | |
+ | |
+} } // namespace v8::internal | |
+ | |
+//namespace WTF { | |
+// | |
+//using namespace JSC::FTL; | |
+// | |
+//void printInternal(PrintStream& out, StackMaps::Location::Kind kind) { | |
+// Style! | |
+// switch (kind) { | |
+// case StackMaps::Location::Unprocessed: | |
+// out.print("Unprocessed"); | |
+// return; | |
+// case StackMaps::Location::Register: | |
+// out.print("Register"); | |
+// return; | |
+// case StackMaps::Location::Direct: | |
+// out.print("Direct"); | |
+// return; | |
+// case StackMaps::Location::Indirect: | |
+// out.print("Indirect"); | |
+// return; | |
+// case StackMaps::Location::Constant: | |
+// out.print("Constant"); | |
+// return; | |
+// case StackMaps::Location::ConstantIndex: | |
+// out.print("ConstantIndex"); | |
+// return; | |
+// } | |
+// dataLog("Unrecognized kind: ", static_cast<int>(kind), "\n"); | |
+// RELEASE_ASSERT_NOT_REACHED(); | |
+//} | |
+// | |
+//} // namespace WTF | |
+// | |
diff --git a/src/llvm/llvm-stackmaps.h b/src/llvm/llvm-stackmaps.h | |
new file mode 100644 | |
index 0000000..23a84e9 | |
--- /dev/null | |
+++ b/src/llvm/llvm-stackmaps.h | |
@@ -0,0 +1,189 @@ | |
+// Copyright (C) 2013, 2014 Apple Inc. All rights reserved. | |
+// | |
+// Redistribution and use in source and binary forms, with or without | |
+// modification, are permitted provided that the following conditions | |
+// are met: | |
+// 1. Redistributions of source code must retain the above copyright | |
+// notice, this list of conditions and the following disclaimer. | |
+// 2. Redistributions in binary form must reproduce the above copyright | |
+// notice, this list of conditions and the following disclaimer in the | |
+// documentation and/or other materials provided with the distribution. | |
+// | |
+// THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
+// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
+ | |
+// This code has been ported from JavaScriptCore (see FTLStackMaps.h). | |
+// Copyright 2015 ISP RAS. All rights reserved. | |
+ | |
+#ifndef V8_LLVM_STACKMAPS_H_ | |
+#define V8_LLVM_STACKMAPS_H_ | |
+ | |
+#include "llvm-headers.h" | |
+//#include "src/list-inl.h" | |
+#include "src/x64/assembler-x64-inl.h" // For now | |
+#include "src/llvm/reg.h" | |
+#include <map> | |
+#include <vector> | |
+ | |
+#define OVERLOAD_STREAM_INSERTION(type) \ | |
+ friend std::ostream& operator<<(std::ostream& os, const type& rhs) { \ | |
+ rhs.dump(os); \ | |
+ return os; \ | |
+ } | |
+ | |
+ | |
+namespace v8 { | |
+namespace internal { | |
+ | |
+class DataView { | |
+ public: | |
+ DataView(byte* array) | |
+ : array_(array), | |
+ offset_(0) {} | |
+ | |
+ template<typename T> | |
+ T read(bool littleEndian) { | |
+ // TODO(llvm): it's gonna be bad for big endian archs. | |
+ USE(littleEndian); | |
+ T result = *reinterpret_cast<T*>(array_ + offset_); | |
+ offset_ += sizeof(T); | |
+ return result; | |
+ } | |
+ | |
+ int offset() { return offset_; } | |
+ private: | |
+ byte* array_; | |
+ int offset_; | |
+}; | |
+ | |
+class DWARFRegister { | |
+ public: | |
+ DWARFRegister() | |
+ : dwarf_reg_num_(-1) {} | |
+ | |
+ explicit DWARFRegister(int16_t dwarf_reg_num) | |
+ : dwarf_reg_num_(dwarf_reg_num) {} | |
+ | |
+ int16_t dwarf_reg_num() const { return dwarf_reg_num_; } | |
+ | |
+ StackMapReg reg() const; | |
+ | |
+ // TODO(llvm): method names should start with a capital (style) | |
+ void dump(std::ostream&) const; | |
+ | |
+ OVERLOAD_STREAM_INSERTION(DWARFRegister) | |
+ | |
+ private: | |
+ int16_t dwarf_reg_num_; | |
+}; | |
+ | |
+struct StackMaps { | |
+ struct ParseContext { | |
+ unsigned version; | |
+ DataView* view; | |
+ }; | |
+ | |
+ struct Constant { | |
+ int64_t integer; | |
+ | |
+ void parse(ParseContext&); | |
+ void dump(std::ostream&) const; | |
+ | |
+ OVERLOAD_STREAM_INSERTION(Constant) | |
+ }; | |
+ | |
+ struct StackSize { | |
+ uint64_t functionOffset; | |
+ uint64_t size; | |
+ | |
+ void parse(ParseContext&); | |
+ void dump(std::ostream&) const; | |
+ | |
+ OVERLOAD_STREAM_INSERTION(StackSize) | |
+ }; | |
+ | |
+ struct Location { | |
+ enum Kind : int8_t { | |
+ kUnprocessed, | |
+ kRegister = 0x1, | |
+ kDirect, | |
+ kIndirect, | |
+ kConstant, | |
+ kConstantIndex | |
+ }; | |
+ | |
+ DWARFRegister dwarf_reg; | |
+ uint8_t size; | |
+ Kind kind; | |
+ int32_t offset; | |
+ | |
+ void parse(ParseContext&); | |
+ void dump(std::ostream&) const; | |
+ | |
+ static const char* ToString(Kind kind); | |
+ | |
+ OVERLOAD_STREAM_INSERTION(Location) | |
+// GPRReg directGPR() const; | |
+// void restoreInto(MacroAssembler&, StackMaps&, char* savedRegisters, GPRReg result) const; | |
+ }; | |
+ | |
+ // TODO(llvm): https://bugs.webkit.org/show_bug.cgi?id=130802 | |
+ struct LiveOut { | |
+ DWARFRegister dwarfReg; | |
+ uint8_t size; | |
+ | |
+ void parse(ParseContext&); | |
+ void dump(std::ostream&) const; | |
+ | |
+ OVERLOAD_STREAM_INSERTION(LiveOut) | |
+ }; | |
+ | |
+ struct Record { | |
+ uint32_t patchpointID; | |
+ uint32_t instructionOffset; | |
+ uint16_t flags; | |
+ | |
+ std::vector<Location> locations; | |
+ std::vector<LiveOut> live_outs; | |
+ | |
+ bool parse(ParseContext&); | |
+ void dump(std::ostream&) const; | |
+ | |
+ OVERLOAD_STREAM_INSERTION(Record) | |
+// TODO(llvm): use RegList. | |
+// RegisterSet liveOutsSet() const; | |
+// RegisterSet locationSet() const; | |
+// RegisterSet usedRegisterSet() const; | |
+ }; | |
+ | |
+ unsigned version; | |
+ std::vector<StackSize> stack_sizes; | |
+ std::vector<Constant> constants; | |
+ std::vector<Record> records; | |
+ | |
+ // Returns true on parse success, false on failure. | |
+ // Failure means that LLVM is signaling compile failure to us. | |
+ bool parse(DataView*); | |
+ void dump(std::ostream&) const; | |
+ void dumpMultiline(std::ostream&, const char* prefix) const; | |
+ | |
+ using RecordMap = std::map<uint32_t, Record>; // PatchPoint ID -> Record | |
+ | |
+ RecordMap computeRecordMap() const; | |
+ | |
+ uint64_t stackSize() const; | |
+}; | |
+ | |
+ | |
+} } // namespace v8::internal | |
+ | |
+#endif // V8_LLVM_STACKMAPS_H_ | |
diff --git a/src/llvm/mcjit-memory-manager.cc b/src/llvm/mcjit-memory-manager.cc | |
new file mode 100644 | |
index 0000000..425e2d1 | |
--- /dev/null | |
+++ b/src/llvm/mcjit-memory-manager.cc | |
@@ -0,0 +1,104 @@ | |
+// Copyright 2015 ISP RAS. All rights reserved. | |
+// Use of this source code is governed by a BSD-style license that can be | |
+// found in the LICENSE file. | |
+ | |
+#include "mcjit-memory-manager.h" | |
+#include "src/allocation.h" | |
+#include "src/base/logging.h" | |
+#include "src/base/platform/platform.h" | |
+// FIXME(llvm): we only need IntHelper from there. Move it to a separate file. | |
+#include "llvm-chunk.h" | |
+ | |
+#include <cstdbool> | |
+#include <cstdint> | |
+#include <string> | |
+ | |
+namespace v8 { | |
+namespace internal { | |
+ | |
+std::unique_ptr<MCJITMemoryManager> MCJITMemoryManager::Create() { | |
+ return llvm::make_unique<MCJITMemoryManager>(); | |
+} | |
+ | |
+MCJITMemoryManager::MCJITMemoryManager() | |
+ : allocated_code_(1), | |
+ allocated_data_(1), | |
+ stackmaps_(1) {} | |
+ | |
+MCJITMemoryManager::~MCJITMemoryManager() { | |
+ for (auto it = allocated_code_.begin(); it != allocated_code_.end(); ++it) { | |
+ DeleteArray(it->buffer); | |
+ } | |
+ for (auto it = allocated_data_.begin(); it != allocated_data_.end(); ++it) { | |
+ DeleteArray(*it); | |
+ } | |
+} | |
+ | |
+void MCJITMemoryManager::notifyObjectLoaded(llvm::ExecutionEngine* engine, | |
+ const llvm::object::ObjectFile &) { | |
+// UNIMPLEMENTED(); | |
+} | |
+ | |
+byte* MCJITMemoryManager::allocateCodeSection(uintptr_t size, | |
+ unsigned alignment, | |
+ unsigned section_id, | |
+ llvm::StringRef section_name) { | |
+#ifdef DEBUG | |
+ std::cerr << __FUNCTION__ << " section_name == " | |
+ << section_name.str() << " section id == " | |
+ << section_id << std::endl; | |
+#endif | |
+ // Note: we don't care for the executable attribute here. | |
+ // Because before being executed the code gets copied to another place. | |
+ byte* buffer = NewArray<byte>(size); | |
+ CHECK(alignment == 0 || | |
+ (reinterpret_cast<uintptr_t>(buffer) & | |
+ static_cast<uintptr_t>(alignment - 1)) == 0); | |
+ CodeDesc desc; | |
+ desc.buffer = buffer; | |
+ desc.buffer_size = IntHelper::AsInt(size); | |
+ desc.instr_size = IntHelper::AsInt(size); | |
+ desc.reloc_size = 0; | |
+ desc.origin = nullptr; | |
+ allocated_code_.Add(desc); | |
+ return buffer; | |
+} | |
+ | |
+byte* MCJITMemoryManager::allocateDataSection(uintptr_t size, | |
+ unsigned alignment, | |
+ unsigned section_id, | |
+ llvm::StringRef section_name, | |
+ bool is_readonly) { | |
+#ifdef DEBUG | |
+ std::cerr << __FUNCTION__ << " section_name == " | |
+ << section_name.str() << " section id == " | |
+ << section_id << " size == " | |
+ << size << std::endl; | |
+#endif | |
+ CHECK(alignment <= base::OS::AllocateAlignment()); | |
+ // TODO(llvm): handle is_readonly | |
+ if (section_name.equals(".got")) { | |
+ UNIMPLEMENTED(); // TODO(llvm): call allocateCodeSection | |
+ // as far as you understand what's happening | |
+ } | |
+ // FIXME(llvm): who frees that memory? | |
+ // The destructor here does. Not sure if it is supposed to. | |
+ | |
+ // FIXME(llvm): this is wrong understanding of the alignment parameter. | |
+ // see allocateCodeSection. | |
+ byte* buffer = NewArray<byte>(RoundUp(size, alignment)); | |
+ allocated_data_.Add(buffer); | |
+ if (section_name.equals(".llvm_stackmaps")) | |
+ stackmaps_.Add(buffer); | |
+#ifdef DEBUG | |
+ std::cerr << reinterpret_cast<void*>(buffer) << std::endl; | |
+#endif | |
+ | |
+ return buffer; | |
+} | |
+ | |
+bool MCJITMemoryManager::finalizeMemory(std::string *ErrMsg) { | |
+ return false; | |
+} | |
+ | |
+} } // namespace v8::internal | |
diff --git a/src/llvm/mcjit-memory-manager.h b/src/llvm/mcjit-memory-manager.h | |
new file mode 100644 | |
index 0000000..5bea88b | |
--- /dev/null | |
+++ b/src/llvm/mcjit-memory-manager.h | |
@@ -0,0 +1,76 @@ | |
+// Copyright 2015 ISP RAS. All rights reserved. | |
+// Use of this source code is governed by a BSD-style license that can be | |
+// found in the LICENSE file. | |
+ | |
+#ifndef V8_MCJIT_MEMORY_MANAGER_H_ | |
+#define V8_MCJIT_MEMORY_MANAGER_H_ | |
+ | |
+#include "llvm-headers.h" | |
+ | |
+#include "src/globals.h" | |
+#include "src/list-inl.h" | |
+ | |
+namespace v8 { | |
+namespace internal { | |
+ | |
+class MCJITMemoryManager : public llvm::RTDyldMemoryManager { | |
+ public: | |
+ static std::unique_ptr<MCJITMemoryManager> Create(); | |
+ | |
+ MCJITMemoryManager(); | |
+ virtual ~MCJITMemoryManager(); | |
+ | |
+ // Allocate a memory block of (at least) the given size suitable for | |
+ // executable code. The section_id is a unique identifier assigned by the | |
+ // MCJIT engine, and optionally recorded by the memory manager to access a | |
+ // loaded section. | |
+ byte* allocateCodeSection(uintptr_t size, unsigned alignment, | |
+ unsigned section_id, | |
+ llvm::StringRef section_name) override; | |
+ | |
+ // Allocate a memory block of (at least) the given size suitable for data. | |
+ // The SectionID is a unique identifier assigned by the JIT engine, and | |
+ // optionally recorded by the memory manager to access a loaded section. | |
+ byte* allocateDataSection(uintptr_t size, unsigned alignment, | |
+ unsigned section_id, llvm::StringRef section_name, | |
+ bool is_readonly) override; | |
+ | |
+ // This method is called after an object has been loaded into memory but | |
+ // before relocations are applied to the loaded sections. The object load | |
+ // may have been initiated by MCJIT to resolve an external symbol for another | |
+ // object that is being finalized. In that case, the object about which | |
+ // the memory manager is being notified will be finalized immediately after | |
+ // the memory manager returns from this call. | |
+ // | |
+ // Memory managers which are preparing code for execution in an external | |
+ // address space can use this call to remap the section addresses for the | |
+ // newly loaded object. | |
+ void notifyObjectLoaded(llvm::ExecutionEngine* engine, | |
+ const llvm::object::ObjectFile &) override; | |
+ | |
+ // This method is called when object loading is complete and section page | |
+ // permissions can be applied. It is up to the memory manager implementation | |
+ // to decide whether or not to act on this method. The memory manager will | |
+ // typically allocate all sections as read-write and then apply specific | |
+ // permissions when this method is called. Code sections cannot be executed | |
+ // until this function has been called. In addition, any cache coherency | |
+ // operations needed to reliably use the memory are also performed. | |
+ // | |
+ // Returns true if an error occurred, false otherwise. | |
+ bool finalizeMemory(std::string *ErrMsg) override; | |
+ | |
+ CodeDesc& LastAllocatedCode() { return allocated_code_.last(); } | |
+ | |
+ List<byte*>& stackmaps() { return stackmaps_; } | |
+ | |
+ void DropStackmaps() { stackmaps_.Free(); } | |
+ private: | |
+ // TODO(llvm): is it OK to allocate those in the zone? | |
+ List<CodeDesc> allocated_code_; | |
+ List<byte*> allocated_data_; | |
+ List<byte*> stackmaps_; | |
+// Zone* zone_; | |
+}; | |
+ | |
+} } // namespace v8::internal | |
+#endif // V8_MCJIT_MEMORY_MANAGER_H_ | |
diff --git a/src/llvm/pass-normalize-phis.cc b/src/llvm/pass-normalize-phis.cc | |
new file mode 100644 | |
index 0000000..d3b1f04 | |
--- /dev/null | |
+++ b/src/llvm/pass-normalize-phis.cc | |
@@ -0,0 +1,116 @@ | |
+// Copyright 2015 ISP RAS. All rights reserved. | |
+// Use of this source code is governed by a BSD-style license that can be | |
+// found in the LICENSE file. | |
+ | |
+#include "pass-normalize-phis.h" | |
+ | |
+#include "src/base/macros.h" | |
+#include <set> | |
+//#include "src/globals.h" | |
+//#include "src/list-inl.h" | |
+ | |
+namespace v8 { | |
+namespace internal { | |
+ | |
+// FunctionPasses may overload three virtual methods to do their work. | |
+// All of these methods should return true if they modified the program, | |
+// or false if they didn’t. | |
+class NormalizePhisPass : public llvm::FunctionPass { | |
+ public: | |
+ NormalizePhisPass(); | |
+ bool runOnFunction(llvm::Function& function) override; | |
+ void getAnalysisUsage(llvm::AnalysisUsage& analysis_usage) const override; | |
+ | |
+// bool doInitialization(Module& module) override { return false; }; | |
+ static char ID; | |
+}; | |
+ | |
+char NormalizePhisPass::ID = 0; | |
+ | |
+NormalizePhisPass::NormalizePhisPass() : FunctionPass(ID) {} | |
+ | |
+bool NormalizePhisPass::runOnFunction(llvm::Function& function) { | |
+ auto debug = false; | |
+#ifdef DEBUG | |
+ debug = true; | |
+#endif | |
+ auto changed = false; | |
+ llvm::DominatorTree& dom_tree = getAnalysis<llvm::DominatorTreeWrapperPass>() | |
+ .getDomTree(); | |
+ if (debug) dom_tree.verifyDomTree(); | |
+ | |
+ // for each BB in the function | |
+ for (auto bb = function.begin(); bb != function.end(); ++bb) { | |
+ if (debug) std::cerr << "Grabbed a new BB\n"; | |
+ llvm::PHINode* phi; | |
+ // for all phi nodes in the block | |
+ for (auto it = bb->begin(); (phi = llvm::dyn_cast<llvm::PHINode>(it)); | |
+ ++it) { | |
+ if (debug) std::cerr << "Grabbed a new Phi\n"; | |
+ // FIXME(llvm): v8 doesn't like STL much | |
+ std::set<llvm::BasicBlock*> preds(llvm::pred_begin(bb), | |
+ llvm::pred_end(bb)); | |
+ std::set<llvm::BasicBlock*> rights; | |
+ std::map<llvm::BasicBlock*, unsigned> wrongs; | |
+ | |
+ // for each phi input | |
+ for (auto i = 0; i < phi->getNumIncomingValues(); ++i) { | |
+ llvm::BasicBlock* incoming = phi->getIncomingBlock(i); | |
+ if (preds.count(incoming)) | |
+ preds.erase(incoming); | |
+ else | |
+ wrongs[incoming] = i; | |
+ } | |
+ | |
+ while (wrongs.size() > rights.size()) { | |
+ // FIXME(llvm): | |
+ // 1) if a loop is gonna run endlessly, fail | |
+ // 2) case if there is no block with dominated == 1 | |
+ if (debug) | |
+ std::cerr << "SIZE BEFORE " << wrongs.size() - rights.size() << "\n"; | |
+ | |
+ for (auto wrong_pair : wrongs) { | |
+ if (rights.count(wrong_pair.first)) continue; | |
+ auto wrong_node = dom_tree.getNode(wrong_pair.first); | |
+ llvm::BasicBlock* unique_dominated = nullptr; | |
+ int dominated = 0; | |
+ bool no_choice = (preds.size() == 1); | |
+ if (no_choice) { | |
+ unique_dominated = *preds.begin(); // here it might not be dominated | |
+ } else { | |
+ for (auto b : preds) { | |
+ if (dom_tree.dominates(wrong_node, dom_tree.getNode(b))) { | |
+ dominated++; | |
+ unique_dominated = b; | |
+ } | |
+ if (dominated > 1) break; | |
+ } | |
+ } | |
+ if (dominated == 1 || no_choice) { | |
+ phi->setIncomingBlock(wrong_pair.second, unique_dominated); | |
+ rights.insert(wrong_pair.first); // effectively remove from wrongs | |
+ preds.erase(unique_dominated); // remove from preds | |
+ changed = true; | |
+ } | |
+ } | |
+ if (debug) | |
+ std::cerr << "SIZE AFTER " << wrongs.size() - rights.size() << "\n"; | |
+ } // while there are wrong blocks left | |
+ } // for all phi nodes in the block | |
+ } // for each BB in the function | |
+ return changed; | |
+} | |
+ | |
+void NormalizePhisPass::getAnalysisUsage( | |
+ llvm::AnalysisUsage& analysis_usage) const { | |
+ analysis_usage.addRequired<llvm::DominatorTreeWrapperPass>(); | |
+ analysis_usage.setPreservesAll(); | |
+} | |
+ | |
+llvm::FunctionPass* createNormalizePhisPass() { | |
+ llvm::initializeDominatorTreeWrapperPassPass( | |
+ *llvm::PassRegistry::getPassRegistry()); | |
+ return new NormalizePhisPass(); | |
+} | |
+ | |
+} } // namespace v8::internal | |
diff --git a/src/llvm/pass-normalize-phis.h b/src/llvm/pass-normalize-phis.h | |
new file mode 100644 | |
index 0000000..f80e809 | |
--- /dev/null | |
+++ b/src/llvm/pass-normalize-phis.h | |
@@ -0,0 +1,17 @@ | |
+// Copyright 2015 ISP RAS. All rights reserved. | |
+// Use of this source code is governed by a BSD-style license that can be | |
+// found in the LICENSE file. | |
+ | |
+#ifndef V8_LLVM_PASSES_H_ | |
+#define V8_LLVM_PASSES_H_ | |
+ | |
+#include "llvm-headers.h" | |
+ | |
+ | |
+namespace v8 { | |
+namespace internal { | |
+ | |
+llvm::FunctionPass* createNormalizePhisPass(); | |
+ | |
+} } // namespace v8::internal | |
+#endif // V8_LLVM_PASSES_H_ | |
diff --git a/src/llvm/pass-rewrite-safepoints.cc b/src/llvm/pass-rewrite-safepoints.cc | |
new file mode 100644 | |
index 0000000..0629337 | |
--- /dev/null | |
+++ b/src/llvm/pass-rewrite-safepoints.cc | |
@@ -0,0 +1,1694 @@ | |
+// Copyright 2015 ISP RAS. All rights reserved. | |
+// Use of this source code is governed by a BSD-style license that can be | |
+// found in the LICENSE file. | |
+//===----------------------------------------------------------------------===// | |
+// | |
+// Rewrite an existing set of gc.statepoints such that they make potential | |
+// relocations performed by the garbage collector explicit in the IR. | |
+// | |
+//===----------------------------------------------------------------------===// | |
+ | |
+// TODO(llvm): edit the LICENSE file so that it is clear this file is derived | |
+// from the LLVM source. | |
+ | |
+#include "llvm-chunk.h" // TODO(llvm): we only use IntHelper from here (move it) | |
+#include "pass-rewrite-safepoints.h" | |
+ | |
+#include "src/base/macros.h" | |
+#include <map> | |
+ | |
+ | |
+using v8::internal::IntHelper; | |
+using v8::internal::ValueSet; | |
+ | |
+using namespace llvm; | |
+ | |
+static bool ClobberNonLive = false; | |
+ | |
+#ifdef DEBUG | |
+// Print the liveset found at the insert location | |
+static bool PrintLiveSet = true; | |
+static bool PrintLiveSetSize = true; | |
+#else | |
+static bool PrintLiveSet = false; | |
+static bool PrintLiveSetSize = false; | |
+#endif | |
+ | |
+ | |
+ | |
+ | |
+namespace { | |
+ | |
+struct RewriteStatepointsForGC : public ModulePass { | |
+ static char ID; // Pass identification, replacement for typeid | |
+ | |
+ RewriteStatepointsForGC(ValueSet& pointers) | |
+ : ModulePass(ID), | |
+ gc_collected_pointers_(pointers) { | |
+ initializeDominatorTreeWrapperPassPass(*PassRegistry::getPassRegistry()); | |
+ initializeTargetTransformInfoWrapperPassPass(*PassRegistry::getPassRegistry()); | |
+ } | |
+ bool runOnFunction(Function &F); | |
+ bool runOnModule(Module &M) override { | |
+ bool Changed = false; | |
+ for (Function &F : M) | |
+ Changed |= runOnFunction(F); | |
+ | |
+ if (Changed) { | |
+ // stripDereferenceabilityInfo asserts that shouldRewriteStatepointsIn | |
+ // returns true for at least one function in the module. Since at least | |
+ // one function changed, we know that the precondition is satisfied. | |
+ stripDereferenceabilityInfo(M); | |
+ } | |
+ | |
+ return Changed; | |
+ } | |
+ | |
+ void getAnalysisUsage(AnalysisUsage &AU) const override { | |
+ // We add and rewrite a bunch of instructions, but don't really do much | |
+ // else. We could in theory preserve a lot more analyses here. | |
+ AU.addRequired<DominatorTreeWrapperPass>(); | |
+ AU.addRequired<TargetTransformInfoWrapperPass>(); | |
+ } | |
+ | |
+ /// The IR fed into RewriteStatepointsForGC may have had attributes implying | |
+ /// dereferenceability that are no longer valid/correct after | |
+ /// RewriteStatepointsForGC has run. This is because semantically, after | |
+ /// RewriteStatepointsForGC runs, all calls to gc.statepoint "free" the entire | |
+ /// heap. stripDereferenceabilityInfo (conservatively) restores correctness | |
+ /// by erasing all attributes in the module that externally imply | |
+ /// dereferenceability. | |
+ /// | |
+ void stripDereferenceabilityInfo(Module &M); | |
+ | |
+ // Helpers for stripDereferenceabilityInfo | |
+ void stripDereferenceabilityInfoFromBody(Function &F); | |
+ void stripDereferenceabilityInfoFromPrototype(Function &F); | |
+ | |
+ private: | |
+ ValueSet& gc_collected_pointers_; | |
+}; | |
+} // namespace | |
+ | |
+namespace v8 { | |
+namespace internal { | |
+ | |
+llvm::ModulePass* createRewriteStatepointsForGCPass(ValueSet& pointers) { | |
+ return new RewriteStatepointsForGC(pointers); | |
+} | |
+ | |
+} } // v8::internal | |
+ | |
+char RewriteStatepointsForGC::ID = 0; | |
+ | |
+namespace { | |
+struct GCPtrLivenessData { | |
+ /// Values defined in this block. | |
+ DenseMap<BasicBlock *, DenseSet<Value *>> KillSet; | |
+ /// Values used in this block (and thus live); does not included values | |
+ /// killed within this block. | |
+ DenseMap<BasicBlock *, DenseSet<Value *>> LiveSet; | |
+ | |
+ /// Values live into this basic block (i.e. used by any | |
+ /// instruction in this basic block or ones reachable from here) | |
+ DenseMap<BasicBlock *, DenseSet<Value *>> LiveIn; | |
+ | |
+ /// Values live out of this basic block (i.e. live into | |
+ /// any successor block) | |
+ DenseMap<BasicBlock *, DenseSet<Value *>> LiveOut; | |
+}; | |
+ | |
+// The type of the internal cache used inside the findBasePointers family | |
+// of functions. From the callers perspective, this is an opaque type and | |
+// should not be inspected. | |
+// | |
+// In the actual implementation this caches two relations: | |
+// - The base relation itself (i.e. this pointer is based on that one) | |
+// - The base defining value relation (i.e. before base_phi insertion) | |
+// Generally, after the execution of a full findBasePointer call, only the | |
+// base relation will remain. Internally, we add a mixture of the two | |
+// types, then update all the second type to the first type | |
+typedef DenseMap<Value *, Value *> DefiningValueMapTy; | |
+typedef DenseSet<llvm::Value *> StatepointLiveSetTy; | |
+typedef DenseMap<Instruction *, Value *> RematerializedValueMapTy; | |
+ | |
+struct PartiallyConstructedSafepointRecord { | |
+ /// The set of values known to be live across this safepoint | |
+ StatepointLiveSetTy liveset; | |
+ | |
+ /// The *new* gc.statepoint instruction itself. This produces the token | |
+ /// that normal path gc.relocates and the gc.result are tied to. | |
+ Instruction *StatepointToken; | |
+ | |
+ /// Instruction to which exceptional gc relocates are attached | |
+ /// Makes it easier to iterate through them during relocationViaAlloca. | |
+ Instruction *UnwindToken; | |
+ | |
+ /// Record live values we are rematerialized instead of relocating. | |
+ /// They are not included into 'liveset' field. | |
+ /// Maps rematerialized copy to it's original value. | |
+ RematerializedValueMapTy RematerializedValues; | |
+}; | |
+} | |
+ | |
+/// Compute the live-in set for every basic block in the function | |
+static void computeLiveInValues(DominatorTree &DT, Function &F, | |
+ GCPtrLivenessData &Data, | |
+ ValueSet& gc_collected_pointers); | |
+ | |
+/// Given results from the dataflow liveness computation, find the set of live | |
+/// Values at a particular instruction. | |
+static void findLiveSetAtInst(Instruction *inst, GCPtrLivenessData &Data, | |
+ StatepointLiveSetTy &out, | |
+ ValueSet& gc_collected_pointers); | |
+ | |
+// TODO: Once we can get to the GCStrategy, this becomes | |
+// Optional<bool> isGCManagedPointer(const Value *V) const override { | |
+ | |
+static bool isGCPointerType(Type *T) { | |
+ if (auto *PT = dyn_cast<PointerType>(T)) | |
+ // For the sake of this example GC, we arbitrarily pick addrspace(1) as our | |
+ // GC managed heap. We know that a pointer into this heap needs to be | |
+ // updated and that no other pointer does. | |
+ return (1 == PT->getAddressSpace()); | |
+ return false; | |
+} | |
+ | |
+// Return true if this type is one which a) is a gc pointer or contains a GC | |
+// pointer and b) is of a type this code expects to encounter as a live value. | |
+// (The insertion code will DCHECK that a type which matches (a) and not (b) | |
+// is not encountered.) | |
+static bool isHandledGCPointerType(Type *T) { | |
+ // We fully support gc pointers | |
+ if (isGCPointerType(T)) | |
+ return true; | |
+ // We partially support vectors of gc pointers. The code will DCHECK if it | |
+ // can't handle something. | |
+ if (auto VT = dyn_cast<VectorType>(T)) | |
+ if (isGCPointerType(VT->getElementType())) | |
+ return true; | |
+ return false; | |
+} | |
+ | |
+static bool order_by_name(llvm::Value *a, llvm::Value *b) { | |
+ if (a->hasName() && b->hasName()) { | |
+ return -1 == a->getName().compare(b->getName()); | |
+ } else if (a->hasName() && !b->hasName()) { | |
+ return true; | |
+ } else if (!a->hasName() && b->hasName()) { | |
+ return false; | |
+ } else { | |
+ // Better than nothing, but not stable | |
+ return a < b; | |
+ } | |
+} | |
+ | |
+// Return the name of the value suffixed with the provided value, or if the | |
+// value didn't have a name, the default value specified. | |
+static std::string suffixed_name_or(Value *V, StringRef Suffix, | |
+ StringRef DefaultName) { | |
+ return V->hasName() ? (V->getName() + Suffix).str() : DefaultName.str(); | |
+} | |
+ | |
+// Conservatively identifies any definitions which might be live at the | |
+// given instruction. The analysis is performed immediately before the | |
+// given instruction. Values defined by that instruction are not considered | |
+// live. Values used by that instruction are considered live. | |
+static void analyzeParsePointLiveness( | |
+ DominatorTree &DT, GCPtrLivenessData &OriginalLivenessData, | |
+ const CallSite &CS, PartiallyConstructedSafepointRecord &result, | |
+ ValueSet& gc_collected_pointers) { | |
+ Instruction *inst = CS.getInstruction(); | |
+ | |
+ StatepointLiveSetTy liveset; | |
+ findLiveSetAtInst(inst, OriginalLivenessData, liveset, gc_collected_pointers); | |
+ | |
+ if (PrintLiveSet) { | |
+ // Note: This output is used by several of the test cases | |
+ // The order of elements in a set is not stable, put them in a vec and sort | |
+ // by name | |
+ SmallVector<Value *, 64> Temp; | |
+ Temp.insert(Temp.end(), liveset.begin(), liveset.end()); | |
+ std::sort(Temp.begin(), Temp.end(), order_by_name); | |
+ errs() << "Live Variables:\n"; | |
+ for (Value *V : Temp) | |
+ dbgs() << " " << V->getName() << " " << *V << "\n"; | |
+ } | |
+ if (PrintLiveSetSize) { | |
+ errs() << "Safepoint For: " << CS.getCalledValue()->getName() << "\n"; | |
+ errs() << "Number live values: " << liveset.size() << "\n"; | |
+ } | |
+ result.liveset = liveset; | |
+} | |
+ | |
+static bool isKnownBaseResult(Value *V); | |
+namespace { | |
+/// A single base defining value - An immediate base defining value for an | |
+/// instruction 'Def' is an input to 'Def' whose base is also a base of 'Def'. | |
+/// For instructions which have multiple pointer [vector] inputs or that | |
+/// transition between vector and scalar types, there is no immediate base | |
+/// defining value. The 'base defining value' for 'Def' is the transitive | |
+/// closure of this relation stopping at the first instruction which has no | |
+/// immediate base defining value. The b.d.v. might itself be a base pointer, | |
+/// but it can also be an arbitrary derived pointer. | |
+struct BaseDefiningValueResult { | |
+ /// Contains the value which is the base defining value. | |
+ Value * const BDV; | |
+ /// True if the base defining value is also known to be an actual base | |
+ /// pointer. | |
+ const bool IsKnownBase; | |
+ BaseDefiningValueResult(Value *BDV, bool IsKnownBase) | |
+ : BDV(BDV), IsKnownBase(IsKnownBase) { | |
+#ifndef NDEBUG | |
+ // Check consistency between new and old means of checking whether a BDV is | |
+ // a base. | |
+ bool MustBeBase = isKnownBaseResult(BDV); | |
+ USE(MustBeBase); | |
+ DCHECK(!MustBeBase || MustBeBase == IsKnownBase); | |
+#endif | |
+ } | |
+}; | |
+} | |
+ | |
+/// Given the result of a call to findBaseDefiningValue, or findBaseOrBDV, | |
+/// is it known to be a base pointer? Or do we need to continue searching. | |
+static bool isKnownBaseResult(Value *V) { | |
+ if (!isa<PHINode>(V) && !isa<SelectInst>(V) && | |
+ !isa<ExtractElementInst>(V) && !isa<InsertElementInst>(V) && | |
+ !isa<ShuffleVectorInst>(V)) { | |
+ // no recursion possible | |
+ return true; | |
+ } | |
+ if (isa<Instruction>(V) && | |
+ cast<Instruction>(V)->getMetadata("is_base_value")) { | |
+ // This is a previously inserted base phi or select. We know | |
+ // that this is a base value. | |
+ return true; | |
+ } | |
+ | |
+ // We need to keep searching | |
+ return false; | |
+} | |
+ | |
+namespace { | |
+/// Models the state of a single base defining value in the findBasePointer | |
+/// algorithm for determining where a new instruction is needed to propagate | |
+/// the base of this BDV. | |
+class BDVState { | |
+public: | |
+ enum Status { Unknown, Base, Conflict }; | |
+ | |
+ BDVState(Status s, Value *b = nullptr) : status(s), base(b) { | |
+ DCHECK(status != Base || b); | |
+ } | |
+ explicit BDVState(Value *b) : status(Base), base(b) {} | |
+ BDVState() : status(Unknown), base(nullptr) {} | |
+ | |
+ Status getStatus() const { return status; } | |
+ Value *getBase() const { return base; } | |
+ | |
+ bool isBase() const { return getStatus() == Base; } | |
+ bool isUnknown() const { return getStatus() == Unknown; } | |
+ bool isConflict() const { return getStatus() == Conflict; } | |
+ | |
+ bool operator==(const BDVState &other) const { | |
+ return base == other.base && status == other.status; | |
+ } | |
+ | |
+ bool operator!=(const BDVState &other) const { return !(*this == other); } | |
+ | |
+ LLVM_DUMP_METHOD | |
+ void dump() const { print(dbgs()); dbgs() << '\n'; } | |
+ | |
+ void print(raw_ostream &OS) const { | |
+ switch (status) { | |
+ case Unknown: | |
+ OS << "U"; | |
+ break; | |
+ case Base: | |
+ OS << "B"; | |
+ break; | |
+ case Conflict: | |
+ OS << "C"; | |
+ break; | |
+ }; | |
+ OS << " (" << base << " - " | |
+ << (base ? base->getName() : "nullptr") << "): "; | |
+ } | |
+ | |
+private: | |
+ Status status; | |
+ Value *base; // non null only if status == base | |
+}; | |
+} | |
+ | |
+#ifdef DEBUG | |
+static raw_ostream &operator<<(raw_ostream &OS, const BDVState &State) { | |
+ State.print(OS); | |
+ return OS; | |
+} | |
+#endif | |
+ | |
+namespace { | |
+// Values of type BDVState form a lattice, and this is a helper | |
+// class that implementes the meet operation. The meat of the meet | |
+// operation is implemented in MeetBDVStates::pureMeet | |
+class MeetBDVStates { | |
+public: | |
+ /// Initializes the currentResult to the TOP state so that if can be met with | |
+ /// any other state to produce that state. | |
+ MeetBDVStates() {} | |
+ | |
+ // Destructively meet the current result with the given BDVState | |
+ void meetWith(BDVState otherState) { | |
+ currentResult = meet(otherState, currentResult); | |
+ } | |
+ | |
+ BDVState getResult() const { return currentResult; } | |
+ | |
+private: | |
+ BDVState currentResult; | |
+ | |
+ /// Perform a meet operation on two elements of the BDVState lattice. | |
+ static BDVState meet(BDVState LHS, BDVState RHS) { | |
+ DCHECK((pureMeet(LHS, RHS) == pureMeet(RHS, LHS)) && | |
+ "math is wrong: meet does not commute!"); | |
+ BDVState Result = pureMeet(LHS, RHS); | |
+#ifdef DEBUG | |
+ dbgs() << "meet of " << LHS << " with " << RHS << " produced " | |
+ << Result << "\n"; | |
+#endif | |
+ return Result; | |
+ } | |
+ | |
+ static BDVState pureMeet(const BDVState &stateA, const BDVState &stateB) { | |
+ switch (stateA.getStatus()) { | |
+ case BDVState::Unknown: | |
+ return stateB; | |
+ | |
+ case BDVState::Base: | |
+ DCHECK(stateA.getBase() && "can't be null"); | |
+ if (stateB.isUnknown()) | |
+ return stateA; | |
+ | |
+ if (stateB.isBase()) { | |
+ if (stateA.getBase() == stateB.getBase()) { | |
+ DCHECK(stateA == stateB && "equality broken!"); | |
+ return stateA; | |
+ } | |
+ return BDVState(BDVState::Conflict); | |
+ } | |
+ DCHECK(stateB.isConflict() && "only three states!"); | |
+ return BDVState(BDVState::Conflict); | |
+ | |
+ case BDVState::Conflict: | |
+ return stateA; | |
+ } | |
+ llvm_unreachable("only three states!"); | |
+ } | |
+}; | |
+} | |
+ | |
+/// Given an updated version of the dataflow liveness results, update the | |
+/// liveset and base pointer maps for the call site CS. | |
+static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, | |
+ const CallSite &CS, | |
+ PartiallyConstructedSafepointRecord &result, | |
+ ValueSet& gc_collected_pointers); | |
+ | |
+static void recomputeLiveInValues( | |
+ Function &F, DominatorTree &DT, Pass *P, ArrayRef<CallSite> toUpdate, | |
+ MutableArrayRef<struct PartiallyConstructedSafepointRecord> records, | |
+ ValueSet& gc_collected_pointers) { | |
+ // TODO-PERF: reuse the original liveness, then simply run the dataflow | |
+ // again. The old values are still live and will help it stabilize quickly. | |
+ GCPtrLivenessData RevisedLivenessData; | |
+ computeLiveInValues(DT, F, RevisedLivenessData, gc_collected_pointers); | |
+ for (size_t i = 0; i < records.size(); i++) { | |
+ struct PartiallyConstructedSafepointRecord &info = records[i]; | |
+ const CallSite &CS = toUpdate[i]; | |
+ recomputeLiveInValues(RevisedLivenessData, CS, info, gc_collected_pointers); | |
+ } | |
+} | |
+ | |
+// When inserting gc.relocate calls, we need to ensure there are no uses | |
+// of the original value between the gc.statepoint and the gc.relocate call. | |
+// One case which can arise is a phi node starting one of the successor blocks. | |
+// We also need to be able to insert the gc.relocates only on the path which | |
+// goes through the statepoint. We might need to split an edge to make this | |
+// possible. | |
+static BasicBlock * | |
+normalizeForInvokeSafepoint(BasicBlock *BB, BasicBlock *InvokeParent, | |
+ DominatorTree &DT) { | |
+ BasicBlock *Ret = BB; | |
+ if (!BB->getUniquePredecessor()) { | |
+ Ret = SplitBlockPredecessors(BB, InvokeParent, "", &DT); | |
+ } | |
+ | |
+ // Now that 'ret' has unique predecessor we can safely remove all phi nodes | |
+ // from it | |
+ FoldSingleEntryPHINodes(Ret); | |
+ DCHECK(!isa<PHINode>(Ret->begin())); | |
+ | |
+ // At this point, we can safely insert a gc.relocate as the first instruction | |
+ // in Ret if needed. | |
+ return Ret; | |
+} | |
+ | |
+static size_t find_index(ArrayRef<Value *> livevec, Value *val) { | |
+ auto itr = std::find(livevec.begin(), livevec.end(), val); | |
+ DCHECK(livevec.end() != itr); | |
+ size_t index = std::distance(livevec.begin(), itr); | |
+ DCHECK(index < livevec.size()); | |
+ return index; | |
+} | |
+ | |
+// Create new attribute set containing only attributes which can be transferred | |
+// from original call to the safepoint. | |
+static AttributeSet legalizeCallAttributes(AttributeSet AS) { | |
+ AttributeSet ret; | |
+ | |
+ for (unsigned Slot = 0; Slot < AS.getNumSlots(); Slot++) { | |
+ unsigned index = AS.getSlotIndex(Slot); | |
+ | |
+ if (index == AttributeSet::ReturnIndex || | |
+ index == AttributeSet::FunctionIndex) { | |
+ | |
+ for (auto it = AS.begin(Slot), it_end = AS.end(Slot); it != it_end; | |
+ ++it) { | |
+ Attribute attr = *it; | |
+ | |
+ // Do not allow certain attributes - just skip them | |
+ // Safepoint can not be read only or read none. | |
+ if (attr.hasAttribute(Attribute::ReadNone) || | |
+ attr.hasAttribute(Attribute::ReadOnly)) | |
+ continue; | |
+ | |
+ ret = ret.addAttributes( | |
+ AS.getContext(), index, | |
+ AttributeSet::get(AS.getContext(), index, AttrBuilder(attr))); | |
+ } | |
+ } | |
+ | |
+ // Just skip parameter attributes for now | |
+ } | |
+ | |
+ return ret; | |
+} | |
+ | |
+/// Helper function to place all gc relocates necessary for the given | |
+/// statepoint. | |
+/// Inputs: | |
+/// liveVariables - list of variables to be relocated. | |
+/// liveStart - index of the first live variable. | |
+/// basePtrs - base pointers. | |
+/// statepointToken - statepoint instruction to which relocates should be | |
+/// bound. | |
+/// Builder - Llvm IR builder to be used to construct new calls. | |
+static void CreateGCRelocates(ArrayRef<llvm::Value *> LiveVariables, | |
+ const int LiveStart, | |
+ Instruction *StatepointToken, | |
+ IRBuilder<> Builder) { | |
+ if (LiveVariables.empty()) | |
+ return; | |
+ | |
+ Module *M = StatepointToken->getModule(); | |
+ auto AS = cast<PointerType>(LiveVariables[0]->getType())->getAddressSpace(); | |
+ // FIXME(llvm): write "Tagged" | |
+ Type *Types[] = {Type::getInt8PtrTy(M->getContext(), AS)}; | |
+ Value *GCRelocateDecl = | |
+ Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types); | |
+ | |
+ for (unsigned i = 0; i < LiveVariables.size(); i++) { | |
+ // Generate the gc.relocate call and save the result | |
+ auto index = IntHelper::AsUInt32( | |
+ LiveStart + find_index(LiveVariables, LiveVariables[i])); | |
+ Value *LiveIdx = Builder.getInt32(index); | |
+ Value *BaseIdx = LiveIdx; | |
+ | |
+ // only specify a debug name if we can give a useful one | |
+ CallInst *Reloc = Builder.CreateCall( | |
+ GCRelocateDecl, {StatepointToken, BaseIdx, LiveIdx}, | |
+ suffixed_name_or(LiveVariables[i], ".relocated", "")); | |
+ // Trick CodeGen into thinking there are lots of free registers at this | |
+ // fake call. | |
+ Reloc->setCallingConv(CallingConv::Cold); | |
+ } | |
+} | |
+ | |
+static void | |
+makeStatepointExplicitImpl(const CallSite &CS, /* to replace */ | |
+ const SmallVectorImpl<llvm::Value *> &liveVariables, | |
+ Pass *P, | |
+ PartiallyConstructedSafepointRecord &result) { | |
+ DCHECK(isStatepoint(CS) && | |
+ "This method expects to be rewriting a statepoint"); | |
+ | |
+ BasicBlock *BB = CS.getInstruction()->getParent(); | |
+ DCHECK(BB); | |
+ Function *F = BB->getParent(); | |
+ DCHECK(F && "must be set"); | |
+ Module *M = F->getParent(); | |
+ (void)M; | |
+ DCHECK(M && "must be set"); | |
+ | |
+ // We're not changing the function signature of the statepoint since the gc | |
+ // arguments go into the var args section. | |
+ Function *gc_statepoint_decl = CS.getCalledFunction(); | |
+ | |
+ // Then go ahead and use the builder do actually do the inserts. We insert | |
+ // immediately before the previous instruction under the assumption that all | |
+ // arguments will be available here. We can't insert afterwards since we may | |
+ // be replacing a terminator. | |
+ Instruction *insertBefore = CS.getInstruction(); | |
+ IRBuilder<> Builder(insertBefore); | |
+ // Copy all of the arguments from the original statepoint - this includes the | |
+ // target, call args, and deopt args | |
+ SmallVector<llvm::Value *, 64> args; | |
+ args.insert(args.end(), CS.arg_begin(), CS.arg_end()); | |
+ // TODO: Clear the 'needs rewrite' flag | |
+ | |
+ // add all the pointers to be relocated (gc arguments) | |
+ // Capture the start of the live variable list for use in the gc_relocates | |
+ const int live_start = IntHelper::AsInt(args.size()); | |
+ args.insert(args.end(), liveVariables.begin(), liveVariables.end()); | |
+ | |
+ // Create the statepoint given all the arguments | |
+ Instruction *token = nullptr; | |
+ AttributeSet return_attributes; | |
+ if (CS.isCall()) { | |
+ CallInst *toReplace = cast<CallInst>(CS.getInstruction()); | |
+ CallInst *call = | |
+ Builder.CreateCall(gc_statepoint_decl, args, "safepoint_token"); | |
+ call->setTailCall(toReplace->isTailCall()); | |
+ call->setCallingConv(toReplace->getCallingConv()); | |
+ | |
+ // Currently we will fail on parameter attributes and on certain | |
+ // function attributes. | |
+ AttributeSet new_attrs = legalizeCallAttributes(toReplace->getAttributes()); | |
+ // In case if we can handle this set of attributes - set up function attrs | |
+ // directly on statepoint and return attrs later for gc_result intrinsic. | |
+ call->setAttributes(new_attrs.getFnAttributes()); | |
+ return_attributes = new_attrs.getRetAttributes(); | |
+ | |
+ token = call; | |
+ | |
+ // Put the following gc_result and gc_relocate calls immediately after the | |
+ // the old call (which we're about to delete) | |
+ BasicBlock::iterator next(toReplace); | |
+ DCHECK(BB->end() != next && "not a terminator, must have next"); | |
+ next++; | |
+ Instruction *IP = &*(next); | |
+ Builder.SetInsertPoint(IP); | |
+ Builder.SetCurrentDebugLocation(IP->getDebugLoc()); | |
+ | |
+ } else { | |
+ InvokeInst *toReplace = cast<InvokeInst>(CS.getInstruction()); | |
+ | |
+ // Insert the new invoke into the old block. We'll remove the old one in a | |
+ // moment at which point this will become the new terminator for the | |
+ // original block. | |
+ InvokeInst *invoke = InvokeInst::Create( | |
+ gc_statepoint_decl, toReplace->getNormalDest(), | |
+ toReplace->getUnwindDest(), args, "statepoint_token", toReplace->getParent()); | |
+ invoke->setCallingConv(toReplace->getCallingConv()); | |
+ | |
+ // Currently we will fail on parameter attributes and on certain | |
+ // function attributes. | |
+ AttributeSet new_attrs = legalizeCallAttributes(toReplace->getAttributes()); | |
+ // In case if we can handle this set of attributes - set up function attrs | |
+ // directly on statepoint and return attrs later for gc_result intrinsic. | |
+ invoke->setAttributes(new_attrs.getFnAttributes()); | |
+ return_attributes = new_attrs.getRetAttributes(); | |
+ | |
+ token = invoke; | |
+ | |
+ // Generate gc relocates in exceptional path | |
+ BasicBlock *unwindBlock = toReplace->getUnwindDest(); | |
+ DCHECK(!isa<PHINode>(unwindBlock->begin()) && | |
+ unwindBlock->getUniquePredecessor() && | |
+ "can't safely insert in this block!"); | |
+ | |
+ Instruction *IP = &*(unwindBlock->getFirstInsertionPt()); | |
+ Builder.SetInsertPoint(IP); | |
+ Builder.SetCurrentDebugLocation(toReplace->getDebugLoc()); | |
+ | |
+ // Extract second element from landingpad return value. We will attach | |
+ // exceptional gc relocates to it. | |
+ const unsigned idx = 1; | |
+ Instruction *exceptional_token = | |
+ cast<Instruction>(Builder.CreateExtractValue( | |
+ unwindBlock->getLandingPadInst(), idx, "relocate_token")); | |
+ result.UnwindToken = exceptional_token; | |
+ | |
+ CreateGCRelocates(liveVariables, live_start, exceptional_token, Builder); | |
+ | |
+ // Generate gc relocates and returns for normal block | |
+ BasicBlock *normalDest = toReplace->getNormalDest(); | |
+ DCHECK(!isa<PHINode>(normalDest->begin()) && | |
+ normalDest->getUniquePredecessor() && | |
+ "can't safely insert in this block!"); | |
+ | |
+ IP = &*(normalDest->getFirstInsertionPt()); | |
+ Builder.SetInsertPoint(IP); | |
+ | |
+ // gc relocates will be generated later as if it were regular call | |
+ // statepoint | |
+ } | |
+ DCHECK(token); | |
+ | |
+ // Take the name of the original value call if it had one. | |
+ token->takeName(CS.getInstruction()); | |
+ | |
+// The GCResult is already inserted, we just need to find it | |
+#ifndef NDEBUG | |
+ Instruction *toReplace = CS.getInstruction(); | |
+ USE(toReplace); | |
+ DCHECK((toReplace->hasNUses(0) || toReplace->hasNUses(1)) && | |
+ "only valid use before rewrite is gc.result"); | |
+ DCHECK(!toReplace->hasOneUse() || | |
+ isGCResult(cast<Instruction>(*toReplace->user_begin()))); | |
+#endif | |
+ | |
+ // Update the gc.result of the original statepoint (if any) to use the newly | |
+ // inserted statepoint. This is safe to do here since the token can't be | |
+ // considered a live reference. | |
+ CS.getInstruction()->replaceAllUsesWith(token); | |
+ | |
+ result.StatepointToken = token; | |
+ | |
+ // Second, create a gc.relocate for every live variable | |
+ CreateGCRelocates(liveVariables, live_start, token, Builder); | |
+} | |
+ | |
+namespace { | |
+struct name_ordering { | |
+ Value *base; | |
+ Value *derived; | |
+ bool operator()(name_ordering const &a, name_ordering const &b) { | |
+ return -1 == a.derived->getName().compare(b.derived->getName()); | |
+ } | |
+}; | |
+} | |
+ | |
+// Replace an existing gc.statepoint with a new one and a set of gc.relocates | |
+// which make the relocations happening at this safepoint explicit. | |
+// | |
+// WARNING: Does not do any fixup to adjust users of the original live | |
+// values. That's the callers responsibility. | |
+static void | |
+makeStatepointExplicit(DominatorTree &DT, const CallSite &CS, Pass *P, | |
+ PartiallyConstructedSafepointRecord &result) { | |
+ auto liveset = result.liveset; | |
+ | |
+ // Convert to vector for efficient cross referencing. | |
+ SmallVector<Value *, 64> livevec; | |
+ livevec.reserve(liveset.size()); | |
+ for (Value *L : liveset) { | |
+ livevec.push_back(L); | |
+ } | |
+ | |
+ // Do the actual rewriting and delete the old statepoint | |
+ makeStatepointExplicitImpl(CS, livevec, P, result); | |
+ CS.getInstruction()->eraseFromParent(); | |
+} | |
+ | |
+// Helper function for the relocationViaAlloca. | |
+// It receives iterator to the statepoint gc relocates and emits store to the | |
+// assigned | |
+// location (via allocaMap) for the each one of them. | |
+// Add visited values into the visitedLiveValues set we will later use them | |
+// for sanity check. | |
+static void | |
+insertRelocationStores(iterator_range<Value::user_iterator> GCRelocs, | |
+ DenseMap<Value *, Value *> &AllocaMap, | |
+ DenseSet<Value *> &VisitedLiveValues) { | |
+ | |
+ for (User *U : GCRelocs) { | |
+ if (!isa<IntrinsicInst>(U)) | |
+ continue; | |
+ | |
+ IntrinsicInst *RelocatedValue = cast<IntrinsicInst>(U); | |
+ | |
+ // We only care about relocates | |
+ if (RelocatedValue->getIntrinsicID() != | |
+ Intrinsic::experimental_gc_relocate) { | |
+ continue; | |
+ } | |
+ | |
+ GCRelocateOperands RelocateOperands(RelocatedValue); | |
+ Value *OriginalValue = | |
+ const_cast<Value *>(RelocateOperands.getDerivedPtr()); | |
+ DCHECK(AllocaMap.count(OriginalValue)); | |
+ Value *Alloca = AllocaMap[OriginalValue]; | |
+ | |
+ // Emit store into the related alloca | |
+ // All gc_relocate are i8 addrspace(1)* typed, and it must be bitcasted to | |
+ // the correct type according to alloca. | |
+ DCHECK(RelocatedValue->getNextNode() && "Should always have one since it's not a terminator"); | |
+ IRBuilder<> Builder(RelocatedValue->getNextNode()); | |
+ Value *CastedRelocatedValue = | |
+ Builder.CreateBitCast(RelocatedValue, | |
+ cast<AllocaInst>(Alloca)->getAllocatedType(), | |
+ suffixed_name_or(RelocatedValue, ".casted", "")); | |
+ | |
+ StoreInst *Store = new StoreInst(CastedRelocatedValue, Alloca); | |
+ Store->insertAfter(cast<Instruction>(CastedRelocatedValue)); | |
+ | |
+#ifndef NDEBUG | |
+ VisitedLiveValues.insert(OriginalValue); | |
+#endif | |
+ } | |
+} | |
+ | |
+// Helper function for the "relocationViaAlloca". Similar to the | |
+// "insertRelocationStores" but works for rematerialized values. | |
+static void | |
+insertRematerializationStores( | |
+ RematerializedValueMapTy RematerializedValues, | |
+ DenseMap<Value *, Value *> &AllocaMap, | |
+ DenseSet<Value *> &VisitedLiveValues) { | |
+ | |
+ for (auto RematerializedValuePair: RematerializedValues) { | |
+ Instruction *RematerializedValue = RematerializedValuePair.first; | |
+ Value *OriginalValue = RematerializedValuePair.second; | |
+ | |
+ DCHECK(AllocaMap.count(OriginalValue) && | |
+ "Can not find alloca for rematerialized value"); | |
+ Value *Alloca = AllocaMap[OriginalValue]; | |
+ | |
+ StoreInst *Store = new StoreInst(RematerializedValue, Alloca); | |
+ Store->insertAfter(RematerializedValue); | |
+ | |
+#ifndef NDEBUG | |
+ VisitedLiveValues.insert(OriginalValue); | |
+#endif | |
+ } | |
+} | |
+ | |
+/// do all the relocation update via allocas and mem2reg | |
+static void relocationViaAlloca( | |
+ Function &F, DominatorTree &DT, ArrayRef<Value *> Live, | |
+ ArrayRef<struct PartiallyConstructedSafepointRecord> Records) { | |
+#ifndef NDEBUG | |
+ // record initial number of (static) allocas; we'll check we have the same | |
+ // number when we get done. | |
+ int InitialAllocaNum = 0; | |
+ for (auto I = F.getEntryBlock().begin(), E = F.getEntryBlock().end(); I != E; | |
+ I++) | |
+ if (isa<AllocaInst>(*I)) | |
+ InitialAllocaNum++; | |
+#endif | |
+ | |
+ // TODO-PERF: change data structures, reserve | |
+ DenseMap<Value *, Value *> AllocaMap; | |
+ SmallVector<AllocaInst *, 200> PromotableAllocas; | |
+ // Used later to chack that we have enough allocas to store all values | |
+ std::size_t NumRematerializedValues = 0; | |
+ PromotableAllocas.reserve(Live.size()); | |
+ | |
+ // Emit alloca for "LiveValue" and record it in "allocaMap" and | |
+ // "PromotableAllocas" | |
+ auto emitAllocaFor = [&](Value *LiveValue) { | |
+ AllocaInst *Alloca = new AllocaInst(LiveValue->getType(), "", | |
+ F.getEntryBlock().getFirstNonPHI()); | |
+ AllocaMap[LiveValue] = Alloca; | |
+ PromotableAllocas.push_back(Alloca); | |
+ }; | |
+ | |
+ // emit alloca for each live gc pointer | |
+ for (unsigned i = 0; i < Live.size(); i++) { | |
+ emitAllocaFor(Live[i]); | |
+ } | |
+ | |
+ // emit allocas for rematerialized values | |
+ for (size_t i = 0; i < Records.size(); i++) { | |
+ const struct PartiallyConstructedSafepointRecord &Info = Records[i]; | |
+ | |
+ for (auto RematerializedValuePair : Info.RematerializedValues) { | |
+ Value *OriginalValue = RematerializedValuePair.second; | |
+ if (AllocaMap.count(OriginalValue) != 0) | |
+ continue; | |
+ | |
+ emitAllocaFor(OriginalValue); | |
+ ++NumRematerializedValues; | |
+ } | |
+ } | |
+ | |
+ // The next two loops are part of the same conceptual operation. We need to | |
+ // insert a store to the alloca after the original def and at each | |
+ // redefinition. We need to insert a load before each use. These are split | |
+ // into distinct loops for performance reasons. | |
+ | |
+ // update gc pointer after each statepoint | |
+ // either store a relocated value or null (if no relocated value found for | |
+ // this gc pointer and it is not a gc_result) | |
+ // this must happen before we update the statepoint with load of alloca | |
+ // otherwise we lose the link between statepoint and old def | |
+ for (size_t i = 0; i < Records.size(); i++) { | |
+ const struct PartiallyConstructedSafepointRecord &Info = Records[i]; | |
+ Value *Statepoint = Info.StatepointToken; | |
+ | |
+ // This will be used for consistency check | |
+ DenseSet<Value *> VisitedLiveValues; | |
+ | |
+ // Insert stores for normal statepoint gc relocates | |
+ insertRelocationStores(Statepoint->users(), AllocaMap, VisitedLiveValues); | |
+ | |
+ // In case if it was invoke statepoint | |
+ // we will insert stores for exceptional path gc relocates. | |
+ if (isa<InvokeInst>(Statepoint)) { | |
+ insertRelocationStores(Info.UnwindToken->users(), AllocaMap, | |
+ VisitedLiveValues); | |
+ } | |
+ | |
+ // Do similar thing with rematerialized values | |
+ insertRematerializationStores(Info.RematerializedValues, AllocaMap, | |
+ VisitedLiveValues); | |
+ | |
+ if (ClobberNonLive) { | |
+ // As a debugging aid, pretend that an unrelocated pointer becomes null at | |
+ // the gc.statepoint. This will turn some subtle GC problems into | |
+ // slightly easier to debug SEGVs. Note that on large IR files with | |
+ // lots of gc.statepoints this is extremely costly both memory and time | |
+ // wise. | |
+ SmallVector<AllocaInst *, 64> ToClobber; | |
+ for (auto Pair : AllocaMap) { | |
+ Value *Def = Pair.first; | |
+ AllocaInst *Alloca = cast<AllocaInst>(Pair.second); | |
+ | |
+ // This value was relocated | |
+ if (VisitedLiveValues.count(Def)) { | |
+ continue; | |
+ } | |
+ ToClobber.push_back(Alloca); | |
+ } | |
+ | |
+ auto InsertClobbersAt = [&](Instruction *IP) { | |
+ for (auto *AI : ToClobber) { | |
+ auto AIType = cast<PointerType>(AI->getType()); | |
+ auto PT = cast<PointerType>(AIType->getElementType()); | |
+ Constant *CPN = ConstantPointerNull::get(PT); | |
+ StoreInst *Store = new StoreInst(CPN, AI); | |
+ Store->insertBefore(IP); | |
+ } | |
+ }; | |
+ | |
+ // Insert the clobbering stores. These may get intermixed with the | |
+ // gc.results and gc.relocates, but that's fine. | |
+ if (auto II = dyn_cast<InvokeInst>(Statepoint)) { | |
+ InsertClobbersAt(II->getNormalDest()->getFirstInsertionPt()); | |
+ InsertClobbersAt(II->getUnwindDest()->getFirstInsertionPt()); | |
+ } else { | |
+ BasicBlock::iterator Next(cast<CallInst>(Statepoint)); | |
+ Next++; | |
+ InsertClobbersAt(Next); | |
+ } | |
+ } | |
+ } | |
+ // update use with load allocas and add store for gc_relocated | |
+ for (auto Pair : AllocaMap) { | |
+ Value *Def = Pair.first; | |
+ Value *Alloca = Pair.second; | |
+ | |
+ // we pre-record the uses of allocas so that we dont have to worry about | |
+ // later update | |
+ // that change the user information. | |
+ SmallVector<Instruction *, 20> Uses; | |
+ // PERF: trade a linear scan for repeated reallocation | |
+ Uses.reserve(std::distance(Def->user_begin(), Def->user_end())); | |
+ for (User *U : Def->users()) { | |
+ if (!isa<ConstantExpr>(U)) { | |
+ // If the def has a ConstantExpr use, then the def is either a | |
+ // ConstantExpr use itself or null. In either case | |
+ // (recursively in the first, directly in the second), the oop | |
+ // it is ultimately dependent on is null and this particular | |
+ // use does not need to be fixed up. | |
+ Uses.push_back(cast<Instruction>(U)); | |
+ } | |
+ } | |
+ | |
+ std::sort(Uses.begin(), Uses.end()); | |
+ auto Last = std::unique(Uses.begin(), Uses.end()); | |
+ Uses.erase(Last, Uses.end()); | |
+ | |
+ for (Instruction *Use : Uses) { | |
+ if (isa<PHINode>(Use)) { | |
+ PHINode *Phi = cast<PHINode>(Use); | |
+ for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) { | |
+ if (Def == Phi->getIncomingValue(i)) { | |
+ LoadInst *Load = new LoadInst( | |
+ Alloca, "", Phi->getIncomingBlock(i)->getTerminator()); | |
+ Phi->setIncomingValue(i, Load); | |
+ } | |
+ } | |
+ } else { | |
+ LoadInst *Load = new LoadInst(Alloca, "", Use); | |
+ Use->replaceUsesOfWith(Def, Load); | |
+ } | |
+ } | |
+ | |
+ // emit store for the initial gc value | |
+ // store must be inserted after load, otherwise store will be in alloca's | |
+ // use list and an extra load will be inserted before it | |
+ StoreInst *Store = new StoreInst(Def, Alloca); | |
+ if (Instruction *Inst = dyn_cast<Instruction>(Def)) { | |
+ if (InvokeInst *Invoke = dyn_cast<InvokeInst>(Inst)) { | |
+ // InvokeInst is a TerminatorInst so the store need to be inserted | |
+ // into its normal destination block. | |
+ BasicBlock *NormalDest = Invoke->getNormalDest(); | |
+ Store->insertBefore(NormalDest->getFirstNonPHI()); | |
+ } else { | |
+ DCHECK(!Inst->isTerminator() && | |
+ "The only TerminatorInst that can produce a value is " | |
+ "InvokeInst which is handled above."); | |
+ Store->insertAfter(Inst); | |
+ } | |
+ } else { | |
+ DCHECK(isa<Argument>(Def)); | |
+ Store->insertAfter(cast<Instruction>(Alloca)); | |
+ } | |
+ } | |
+ | |
+ DCHECK(PromotableAllocas.size() == Live.size() + NumRematerializedValues && | |
+ "we must have the same allocas with lives"); | |
+ if (!PromotableAllocas.empty()) { | |
+ // apply mem2reg to promote alloca to SSA | |
+ PromoteMemToReg(PromotableAllocas, DT); | |
+ } | |
+ | |
+#ifndef NDEBUG | |
+ for (auto I = F.getEntryBlock().begin(), E = F.getEntryBlock().end(); I != E; | |
+ I++) | |
+ if (isa<AllocaInst>(*I)) | |
+ InitialAllocaNum--; | |
+ DCHECK(InitialAllocaNum == 0 && "We must not introduce any extra allocas"); | |
+#endif | |
+} | |
+ | |
+/// Implement a unique function which doesn't require we sort the input | |
+/// vector. Doing so has the effect of changing the output of a couple of | |
+/// tests in ways which make them less useful in testing fused safepoints. | |
+template <typename T> static void unique_unsorted(SmallVectorImpl<T> &Vec) { | |
+ SmallSet<T, 8> Seen; | |
+ Vec.erase(std::remove_if(Vec.begin(), Vec.end(), [&](const T &V) { | |
+ return !Seen.insert(V).second; | |
+ }), Vec.end()); | |
+} | |
+ | |
+/// Insert holders so that each Value is obviously live through the entire | |
+/// lifetime of the call. | |
+static void insertUseHolderAfter(CallSite &CS, const ArrayRef<Value *> Values, | |
+ SmallVectorImpl<CallInst *> &Holders) { | |
+ if (Values.empty()) | |
+ // No values to hold live, might as well not insert the empty holder | |
+ return; | |
+ | |
+ Module *M = CS.getInstruction()->getParent()->getParent()->getParent(); | |
+ // Use a dummy vararg function to actually hold the values live | |
+ Function *Func = cast<Function>(M->getOrInsertFunction( | |
+ "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true))); | |
+ if (CS.isCall()) { | |
+ // For call safepoints insert dummy calls right after safepoint | |
+ BasicBlock::iterator Next(CS.getInstruction()); | |
+ Next++; | |
+ Holders.push_back(CallInst::Create(Func, Values, "", Next)); | |
+ return; | |
+ } | |
+ // For invoke safepooints insert dummy calls both in normal and | |
+ // exceptional destination blocks | |
+ auto *II = cast<InvokeInst>(CS.getInstruction()); | |
+ Holders.push_back(CallInst::Create( | |
+ Func, Values, "", II->getNormalDest()->getFirstInsertionPt())); | |
+ Holders.push_back(CallInst::Create( | |
+ Func, Values, "", II->getUnwindDest()->getFirstInsertionPt())); | |
+} | |
+ | |
+static void findLiveReferences( | |
+ Function &F, DominatorTree &DT, Pass *P, ArrayRef<CallSite> toUpdate, | |
+ MutableArrayRef<struct PartiallyConstructedSafepointRecord> records, | |
+ ValueSet& gc_collected_pointers) { | |
+ GCPtrLivenessData OriginalLivenessData; | |
+ computeLiveInValues(DT, F, OriginalLivenessData, gc_collected_pointers); | |
+ for (size_t i = 0; i < records.size(); i++) { | |
+ struct PartiallyConstructedSafepointRecord &info = records[i]; | |
+ const CallSite &CS = toUpdate[i]; | |
+ analyzeParsePointLiveness(DT, OriginalLivenessData, CS, info, | |
+ gc_collected_pointers); | |
+ } | |
+} | |
+ | |
+/// Remove any vector of pointers from the liveset by scalarizing them over the | |
+/// statepoint instruction. Adds the scalarized pieces to the liveset. It | |
+/// would be preferable to include the vector in the statepoint itself, but | |
+/// the lowering code currently does not handle that. Extending it would be | |
+/// slightly non-trivial since it requires a format change. Given how rare | |
+/// such cases are (for the moment?) scalarizing is an acceptable compromise. | |
+static void splitVectorValues(Instruction *StatepointInst, | |
+ StatepointLiveSetTy &LiveSet, | |
+ DominatorTree &DT) { | |
+ SmallVector<Value *, 16> ToSplit; | |
+ for (Value *V : LiveSet) | |
+ if (isa<VectorType>(V->getType())) | |
+ ToSplit.push_back(V); | |
+ | |
+ if (ToSplit.empty()) | |
+ return; | |
+ | |
+ DenseMap<Value *, SmallVector<Value *, 16>> ElementMapping; | |
+ | |
+ Function &F = *(StatepointInst->getParent()->getParent()); | |
+ | |
+ DenseMap<Value *, AllocaInst *> AllocaMap; | |
+ // First is normal return, second is exceptional return (invoke only) | |
+ DenseMap<Value *, std::pair<Value *, Value *>> Replacements; | |
+ for (Value *V : ToSplit) { | |
+ AllocaInst *Alloca = | |
+ new AllocaInst(V->getType(), "", F.getEntryBlock().getFirstNonPHI()); | |
+ AllocaMap[V] = Alloca; | |
+ | |
+ VectorType *VT = cast<VectorType>(V->getType()); | |
+ IRBuilder<> Builder(StatepointInst); | |
+ SmallVector<Value *, 16> Elements; | |
+ for (unsigned i = 0; i < VT->getNumElements(); i++) | |
+ Elements.push_back(Builder.CreateExtractElement(V, Builder.getInt32(i))); | |
+ ElementMapping[V] = Elements; | |
+ | |
+ auto InsertVectorReform = [&](Instruction *IP) { | |
+ Builder.SetInsertPoint(IP); | |
+ Builder.SetCurrentDebugLocation(IP->getDebugLoc()); | |
+ Value *ResultVec = UndefValue::get(VT); | |
+ for (unsigned i = 0; i < VT->getNumElements(); i++) | |
+ ResultVec = Builder.CreateInsertElement(ResultVec, Elements[i], | |
+ Builder.getInt32(i)); | |
+ return ResultVec; | |
+ }; | |
+ | |
+ if (isa<CallInst>(StatepointInst)) { | |
+ BasicBlock::iterator Next(StatepointInst); | |
+ Next++; | |
+ Instruction *IP = &*(Next); | |
+ Replacements[V].first = InsertVectorReform(IP); | |
+ Replacements[V].second = nullptr; | |
+ } else { | |
+ InvokeInst *Invoke = cast<InvokeInst>(StatepointInst); | |
+ // We've already normalized - check that we don't have shared destination | |
+ // blocks | |
+ BasicBlock *NormalDest = Invoke->getNormalDest(); | |
+ DCHECK(!isa<PHINode>(NormalDest->begin())); | |
+ BasicBlock *UnwindDest = Invoke->getUnwindDest(); | |
+ DCHECK(!isa<PHINode>(UnwindDest->begin())); | |
+ // Insert insert element sequences in both successors | |
+ Instruction *IP = &*(NormalDest->getFirstInsertionPt()); | |
+ Replacements[V].first = InsertVectorReform(IP); | |
+ IP = &*(UnwindDest->getFirstInsertionPt()); | |
+ Replacements[V].second = InsertVectorReform(IP); | |
+ } | |
+ } | |
+ | |
+ for (Value *V : ToSplit) { | |
+ AllocaInst *Alloca = AllocaMap[V]; | |
+ | |
+ // Capture all users before we start mutating use lists | |
+ SmallVector<Instruction *, 16> Users; | |
+ for (User *U : V->users()) | |
+ Users.push_back(cast<Instruction>(U)); | |
+ | |
+ for (Instruction *I : Users) { | |
+ if (auto Phi = dyn_cast<PHINode>(I)) { | |
+ for (unsigned i = 0; i < Phi->getNumIncomingValues(); i++) | |
+ if (V == Phi->getIncomingValue(i)) { | |
+ LoadInst *Load = new LoadInst( | |
+ Alloca, "", Phi->getIncomingBlock(i)->getTerminator()); | |
+ Phi->setIncomingValue(i, Load); | |
+ } | |
+ } else { | |
+ LoadInst *Load = new LoadInst(Alloca, "", I); | |
+ I->replaceUsesOfWith(V, Load); | |
+ } | |
+ } | |
+ | |
+ // Store the original value and the replacement value into the alloca | |
+ StoreInst *Store = new StoreInst(V, Alloca); | |
+ if (auto I = dyn_cast<Instruction>(V)) | |
+ Store->insertAfter(I); | |
+ else | |
+ Store->insertAfter(Alloca); | |
+ | |
+ // Normal return for invoke, or call return | |
+ Instruction *Replacement = cast<Instruction>(Replacements[V].first); | |
+ (new StoreInst(Replacement, Alloca))->insertAfter(Replacement); | |
+ // Unwind return for invoke only | |
+ Replacement = cast_or_null<Instruction>(Replacements[V].second); | |
+ if (Replacement) | |
+ (new StoreInst(Replacement, Alloca))->insertAfter(Replacement); | |
+ } | |
+ | |
+ // apply mem2reg to promote alloca to SSA | |
+ SmallVector<AllocaInst *, 16> Allocas; | |
+ for (Value *V : ToSplit) | |
+ Allocas.push_back(AllocaMap[V]); | |
+ PromoteMemToReg(Allocas, DT); | |
+} | |
+ | |
+static bool insertParsePoints(Function &F, DominatorTree &DT, Pass *P, | |
+ SmallVectorImpl<CallSite> &toUpdate, | |
+ ValueSet& gc_collected_pointers) { | |
+#ifndef NDEBUG | |
+ // sanity check the input | |
+ std::set<CallSite> uniqued; | |
+ uniqued.insert(toUpdate.begin(), toUpdate.end()); | |
+ DCHECK(uniqued.size() == toUpdate.size() && "no duplicates please!"); | |
+ | |
+ for (size_t i = 0; i < toUpdate.size(); i++) { | |
+ CallSite &CS = toUpdate[i]; | |
+ USE(CS); | |
+ DCHECK(CS.getInstruction()->getParent()->getParent() == &F); | |
+ DCHECK(isStatepoint(CS) && "expected to already be a deopt statepoint"); | |
+ } | |
+#endif | |
+ | |
+ // When inserting gc.relocates for invokes, we need to be able to insert at | |
+ // the top of the successor blocks. See the comment on | |
+ // normalForInvokeSafepoint on exactly what is needed. Note that this step | |
+ // may restructure the CFG. | |
+ for (CallSite CS : toUpdate) { | |
+ if (!CS.isInvoke()) | |
+ continue; | |
+ InvokeInst *invoke = cast<InvokeInst>(CS.getInstruction()); | |
+ normalizeForInvokeSafepoint(invoke->getNormalDest(), invoke->getParent(), | |
+ DT); | |
+ normalizeForInvokeSafepoint(invoke->getUnwindDest(), invoke->getParent(), | |
+ DT); | |
+ } | |
+ | |
+ // A list of dummy calls added to the IR to keep various values obviously | |
+ // live in the IR. We'll remove all of these when done. | |
+ SmallVector<CallInst *, 64> holders; | |
+ | |
+ // Insert a dummy call with all of the arguments to the vm_state we'll need | |
+ // for the actual safepoint insertion. This ensures reference arguments in | |
+ // the deopt argument list are considered live through the safepoint (and | |
+ // thus makes sure they get relocated.) | |
+ for (size_t i = 0; i < toUpdate.size(); i++) { | |
+ CallSite &CS = toUpdate[i]; | |
+ Statepoint StatepointCS(CS); | |
+ | |
+ SmallVector<Value *, 64> DeoptValues; | |
+ for (Use &U : StatepointCS.vm_state_args()) { | |
+ Value *Arg = cast<Value>(&U); | |
+ if (isHandledGCPointerType(Arg->getType())) | |
+ DeoptValues.push_back(Arg); | |
+ } | |
+ insertUseHolderAfter(CS, DeoptValues, holders); | |
+ } | |
+ | |
+ SmallVector<struct PartiallyConstructedSafepointRecord, 64> records; | |
+ records.reserve(toUpdate.size()); | |
+ for (size_t i = 0; i < toUpdate.size(); i++) { | |
+ struct PartiallyConstructedSafepointRecord info; | |
+ records.push_back(info); | |
+ } | |
+ DCHECK(records.size() == toUpdate.size()); | |
+ | |
+ // A) Identify all gc pointers which are statically live at the given call | |
+ // site. | |
+ findLiveReferences(F, DT, P, toUpdate, records, gc_collected_pointers); | |
+ | |
+ // The base phi insertion logic (for any safepoint) may have inserted new | |
+ // instructions which are now live at some safepoint. The simplest such | |
+ // example is: | |
+ // loop: | |
+ // phi a <-- will be a new base_phi here | |
+ // safepoint 1 <-- that needs to be live here | |
+ // gep a + 1 | |
+ // safepoint 2 | |
+ // br loop | |
+ // We insert some dummy calls after each safepoint to definitely hold live | |
+ // the base pointers which were identified for that safepoint. We'll then | |
+ // ask liveness for _every_ base inserted to see what is now live. Then we | |
+ // remove the dummy calls. | |
+ holders.reserve(holders.size() + records.size()); | |
+ for (size_t i = 0; i < records.size(); i++) { | |
+ struct PartiallyConstructedSafepointRecord &info = records[i]; | |
+ CallSite &CS = toUpdate[i]; | |
+ | |
+ SmallVector<Value *, 128> Bases; | |
+ for (auto pointer: info.liveset) { | |
+ Bases.push_back(pointer); | |
+ } | |
+ insertUseHolderAfter(CS, Bases, holders); | |
+ } | |
+ | |
+ // By selecting base pointers, we've effectively inserted new uses. Thus, we | |
+ // need to rerun liveness. We may *also* have inserted new defs, but that's | |
+ // not the key issue. | |
+ recomputeLiveInValues(F, DT, P, toUpdate, records, gc_collected_pointers); | |
+ | |
+ for (size_t i = 0; i < holders.size(); i++) { | |
+ holders[i]->eraseFromParent(); | |
+ holders[i] = nullptr; | |
+ } | |
+ holders.clear(); | |
+ | |
+ // Do a limited scalarization of any live at safepoint vector values which | |
+ // contain pointers. This enables this pass to run after vectorization at | |
+ // the cost of some possible performance loss. TODO: it would be nice to | |
+ // natively support vectors all the way through the backend so we don't need | |
+ // to scalarize here. | |
+ for (size_t i = 0; i < records.size(); i++) { | |
+ struct PartiallyConstructedSafepointRecord &info = records[i]; | |
+ Instruction *statepoint = toUpdate[i].getInstruction(); | |
+ splitVectorValues(cast<Instruction>(statepoint), info.liveset, DT); | |
+ } | |
+ | |
+ // Now run through and replace the existing statepoints with new ones with | |
+ // the live variables listed. We do not yet update uses of the values being | |
+ // relocated. We have references to live variables that need to | |
+ // survive to the last iteration of this loop. (By construction, the | |
+ // previous statepoint can not be a live variable, thus we can and remove | |
+ // the old statepoint calls as we go.) | |
+ for (size_t i = 0; i < records.size(); i++) { | |
+ struct PartiallyConstructedSafepointRecord &info = records[i]; | |
+ CallSite &CS = toUpdate[i]; | |
+ makeStatepointExplicit(DT, CS, P, info); | |
+ } | |
+ toUpdate.clear(); // prevent accident use of invalid CallSites | |
+ | |
+ // Do all the fixups of the original live variables to their relocated selves | |
+ SmallVector<Value *, 128> live; | |
+ for (size_t i = 0; i < records.size(); i++) { | |
+ struct PartiallyConstructedSafepointRecord &info = records[i]; | |
+ // We can't simply save the live set from the original insertion. One of | |
+ // the live values might be the result of a call which needs a safepoint. | |
+ // That Value* no longer exists and we need to use the new gc_result. | |
+ // Thankfully, the liveset is embedded in the statepoint (and updated), so | |
+ // we just grab that. | |
+ Statepoint statepoint(info.StatepointToken); | |
+ live.insert(live.end(), statepoint.gc_args_begin(), | |
+ statepoint.gc_args_end()); | |
+#ifndef NDEBUG | |
+ // Do some basic sanity checks on our liveness results before performing | |
+ // relocation. Relocation can and will turn mistakes in liveness results | |
+ // into non-sensical code which is must harder to debug. | |
+ // TODO: It would be nice to test consistency as well | |
+ DCHECK(DT.isReachableFromEntry(info.StatepointToken->getParent()) && | |
+ "statepoint must be reachable or liveness is meaningless"); | |
+ for (Value *V : statepoint.gc_args()) { | |
+ if (!isa<Instruction>(V)) | |
+ // Non-instruction values trivial dominate all possible uses | |
+ continue; | |
+ auto LiveInst = cast<Instruction>(V); | |
+ USE(LiveInst); | |
+ DCHECK(DT.isReachableFromEntry(LiveInst->getParent()) && | |
+ "unreachable values should never be live"); | |
+ DCHECK(DT.dominates(LiveInst, info.StatepointToken) && | |
+ "basic SSA liveness expectation violated by liveness analysis"); | |
+ } | |
+#endif | |
+ } | |
+ unique_unsorted(live); | |
+ | |
+ relocationViaAlloca(F, DT, live, records); | |
+ return !records.empty(); | |
+} | |
+ | |
+// Handles both return values and arguments for Functions and CallSites. | |
+template <typename AttrHolder> | |
+static void RemoveDerefAttrAtIndex(LLVMContext &Ctx, AttrHolder &AH, | |
+ unsigned Index) { | |
+ AttrBuilder R; | |
+ if (AH.getDereferenceableBytes(Index)) | |
+ R.addAttribute(Attribute::get(Ctx, Attribute::Dereferenceable, | |
+ AH.getDereferenceableBytes(Index))); | |
+ if (AH.getDereferenceableOrNullBytes(Index)) | |
+ R.addAttribute(Attribute::get(Ctx, Attribute::DereferenceableOrNull, | |
+ AH.getDereferenceableOrNullBytes(Index))); | |
+ | |
+ if (!R.empty()) | |
+ AH.setAttributes(AH.getAttributes().removeAttributes( | |
+ Ctx, Index, AttributeSet::get(Ctx, Index, R))); | |
+} | |
+ | |
+void | |
+RewriteStatepointsForGC::stripDereferenceabilityInfoFromPrototype(Function &F) { | |
+ LLVMContext &Ctx = F.getContext(); | |
+ | |
+ for (Argument &A : F.args()) | |
+ if (isa<PointerType>(A.getType())) | |
+ RemoveDerefAttrAtIndex(Ctx, F, A.getArgNo() + 1); | |
+ | |
+ if (isa<PointerType>(F.getReturnType())) | |
+ RemoveDerefAttrAtIndex(Ctx, F, AttributeSet::ReturnIndex); | |
+} | |
+ | |
+void RewriteStatepointsForGC::stripDereferenceabilityInfoFromBody(Function &F) { | |
+ if (F.empty()) | |
+ return; | |
+ | |
+ LLVMContext &Ctx = F.getContext(); | |
+ MDBuilder Builder(Ctx); | |
+ | |
+ for (Instruction &I : instructions(F)) { | |
+ if (const MDNode *MD = I.getMetadata(LLVMContext::MD_tbaa)) { | |
+ DCHECK(MD->getNumOperands() < 5 && "unrecognized metadata shape!"); | |
+ bool IsImmutableTBAA = | |
+ MD->getNumOperands() == 4 && | |
+ mdconst::extract<ConstantInt>(MD->getOperand(3))->getValue() == 1; | |
+ | |
+ if (!IsImmutableTBAA) | |
+ continue; // no work to do, MD_tbaa is already marked mutable | |
+ | |
+ MDNode *Base = cast<MDNode>(MD->getOperand(0)); | |
+ MDNode *Access = cast<MDNode>(MD->getOperand(1)); | |
+ uint64_t Offset = | |
+ mdconst::extract<ConstantInt>(MD->getOperand(2))->getZExtValue(); | |
+ | |
+ MDNode *MutableTBAA = | |
+ Builder.createTBAAStructTagNode(Base, Access, Offset); | |
+ I.setMetadata(LLVMContext::MD_tbaa, MutableTBAA); | |
+ } | |
+ | |
+ if (CallSite CS = CallSite(&I)) { | |
+ for (int i = 0, e = CS.arg_size(); i != e; i++) | |
+ if (isa<PointerType>(CS.getArgument(i)->getType())) | |
+ RemoveDerefAttrAtIndex(Ctx, CS, i + 1); | |
+ if (isa<PointerType>(CS.getType())) | |
+ RemoveDerefAttrAtIndex(Ctx, CS, AttributeSet::ReturnIndex); | |
+ } | |
+ } | |
+} | |
+ | |
+/// Returns true if this function should be rewritten by this pass. The main | |
+/// point of this function is as an extension point for custom logic. | |
+static bool shouldRewriteStatepointsIn(Function &F) { | |
+ // TODO: This should check the GCStrategy | |
+ if (F.hasGC()) { | |
+ const char *FunctionGCName = F.getGC(); | |
+ const StringRef StatepointExampleName("statepoint-example"); | |
+ const StringRef CoreCLRName("coreclr"); | |
+ const StringRef V8GCName("v8-gc"); | |
+ return (StatepointExampleName == FunctionGCName) || | |
+ (CoreCLRName == FunctionGCName) || | |
+ (V8GCName == FunctionGCName); | |
+ } else | |
+ return false; | |
+} | |
+ | |
+void RewriteStatepointsForGC::stripDereferenceabilityInfo(Module &M) { | |
+#ifndef NDEBUG | |
+ DCHECK(std::any_of(M.begin(), M.end(), shouldRewriteStatepointsIn) && | |
+ "precondition!"); | |
+#endif | |
+ | |
+ for (Function &F : M) | |
+ stripDereferenceabilityInfoFromPrototype(F); | |
+ | |
+ for (Function &F : M) | |
+ stripDereferenceabilityInfoFromBody(F); | |
+} | |
+ | |
+bool RewriteStatepointsForGC::runOnFunction(Function &F) { | |
+ // Nothing to do for declarations. | |
+ if (F.isDeclaration() || F.empty()) | |
+ return false; | |
+ | |
+ // Policy choice says not to rewrite - the most common reason is that we're | |
+ // compiling code without a GCStrategy. | |
+ if (!shouldRewriteStatepointsIn(F)) | |
+ return false; | |
+ | |
+ DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>(F).getDomTree(); | |
+ | |
+ // Gather all the statepoints which need rewritten. Be careful to only | |
+ // consider those in reachable code since we need to ask dominance queries | |
+ // when rewriting. We'll delete the unreachable ones in a moment. | |
+ SmallVector<CallSite, 64> ParsePointNeeded; | |
+ bool HasUnreachableStatepoint = false; | |
+ for (Instruction &I : instructions(F)) { | |
+ // TODO: only the ones with the flag set! | |
+ if (isStatepoint(I)) { | |
+ if (DT.isReachableFromEntry(I.getParent())) | |
+ ParsePointNeeded.push_back(CallSite(&I)); | |
+ else | |
+ HasUnreachableStatepoint = true; | |
+ } | |
+ } | |
+ | |
+ bool MadeChange = false; | |
+ | |
+ // Delete any unreachable statepoints so that we don't have unrewritten | |
+ // statepoints surviving this pass. This makes testing easier and the | |
+ // resulting IR less confusing to human readers. Rather than be fancy, we | |
+ // just reuse a utility function which removes the unreachable blocks. | |
+ if (HasUnreachableStatepoint) | |
+ MadeChange |= removeUnreachableBlocks(F); | |
+ | |
+ // Return early if no work to do. | |
+ if (ParsePointNeeded.empty()) | |
+ return MadeChange; | |
+ | |
+ // As a prepass, go ahead and aggressively destroy single entry phi nodes. | |
+ // These are created by LCSSA. They have the effect of increasing the size | |
+ // of liveness sets for no good reason. It may be harder to do this post | |
+ // insertion since relocations and base phis can confuse things. | |
+ for (BasicBlock &BB : F) | |
+ if (BB.getUniquePredecessor()) { | |
+ MadeChange = true; | |
+ FoldSingleEntryPHINodes(&BB); | |
+ } | |
+ | |
+ // Before we start introducing relocations, we want to tweak the IR a bit to | |
+ // avoid unfortunate code generation effects. The main example is that we | |
+ // want to try to make sure the comparison feeding a branch is after any | |
+ // safepoints. Otherwise, we end up with a comparison of pre-relocation | |
+ // values feeding a branch after relocation. This is semantically correct, | |
+ // but results in extra register pressure since both the pre-relocation and | |
+ // post-relocation copies must be available in registers. For code without | |
+ // relocations this is handled elsewhere, but teaching the scheduler to | |
+ // reverse the transform we're about to do would be slightly complex. | |
+ // Note: This may extend the live range of the inputs to the icmp and thus | |
+ // increase the liveset of any statepoint we move over. This is profitable | |
+ // as long as all statepoints are in rare blocks. If we had in-register | |
+ // lowering for live values this would be a much safer transform. | |
+ auto getConditionInst = [](TerminatorInst *TI) -> Instruction* { | |
+ if (auto *BI = dyn_cast<BranchInst>(TI)) | |
+ if (BI->isConditional()) | |
+ return dyn_cast<Instruction>(BI->getCondition()); | |
+ // TODO: Extend this to handle switches | |
+ return nullptr; | |
+ }; | |
+ for (BasicBlock &BB : F) { | |
+ TerminatorInst *TI = BB.getTerminator(); | |
+ if (auto *Cond = getConditionInst(TI)) | |
+ // TODO: Handle more than just ICmps here. We should be able to move | |
+ // most instructions without side effects or memory access. | |
+ if (isa<ICmpInst>(Cond) && Cond->hasOneUse()) { | |
+ MadeChange = true; | |
+ Cond->moveBefore(TI); | |
+ } | |
+ } | |
+ | |
+ MadeChange |= insertParsePoints(F, DT, this, ParsePointNeeded, | |
+ gc_collected_pointers_); | |
+ return MadeChange; | |
+} | |
+ | |
+// liveness computation via standard dataflow | |
+// ------------------------------------------------------------------- | |
+ | |
+// TODO: Consider using bitvectors for liveness, the set of potentially | |
+// interesting values should be small and easy to pre-compute. | |
+ | |
+/// Compute the live-in set for the location rbegin starting from | |
+/// the live-out set of the basic block | |
+static void computeLiveInValues(BasicBlock::reverse_iterator rbegin, | |
+ BasicBlock::reverse_iterator rend, | |
+ DenseSet<Value *> &LiveTmp, | |
+ ValueSet& gc_collected_pointers) { | |
+ | |
+ for (BasicBlock::reverse_iterator ritr = rbegin; ritr != rend; ritr++) { | |
+ Instruction *I = &*ritr; | |
+ | |
+ // KILL/Def - Remove this definition from LiveIn | |
+ LiveTmp.erase(I); | |
+ | |
+ // Don't consider *uses* in PHI nodes, we handle their contribution to | |
+ // predecessor blocks when we seed the LiveOut sets | |
+ if (isa<PHINode>(I)) | |
+ continue; | |
+ | |
+ // USE - Add to the LiveIn set for this instruction | |
+ for (Value *V : I->operands()) { | |
+ if (gc_collected_pointers.count(V)) { | |
+ LiveTmp.insert(V); | |
+ } | |
+ } | |
+ } | |
+} | |
+ | |
+static void computeLiveOutSeed(BasicBlock *BB, | |
+ DenseSet<Value *> &LiveTmp, | |
+ ValueSet& gc_collected_pointers) { | |
+ | |
+ for (BasicBlock *Succ : successors(BB)) { | |
+ const BasicBlock::iterator E(Succ->getFirstNonPHI()); | |
+ for (BasicBlock::iterator I = Succ->begin(); I != E; I++) { | |
+ PHINode *Phi = cast<PHINode>(&*I); | |
+ Value *V = Phi->getIncomingValueForBlock(BB); | |
+ if (gc_collected_pointers.count(V)) | |
+ LiveTmp.insert(V); | |
+ } | |
+ } | |
+} | |
+ | |
+static DenseSet<Value *> computeKillSet(BasicBlock *BB, | |
+ ValueSet& gc_collected_pointers) { | |
+ DenseSet<Value *> KillSet; | |
+ for (Instruction &I : *BB) | |
+ if (gc_collected_pointers.count(&I)) | |
+ KillSet.insert(&I); | |
+ return KillSet; | |
+} | |
+ | |
+#ifndef NDEBUG | |
+/// Check that the items in 'Live' dominate 'TI'. This is used as a basic | |
+/// sanity check for the liveness computation. | |
+static void checkBasicSSA(DominatorTree &DT, DenseSet<Value *> &Live, | |
+ TerminatorInst *TI, bool TermOkay = false) { | |
+ for (Value *V : Live) { | |
+ if (auto *I = dyn_cast<Instruction>(V)) { | |
+ // The terminator can be a member of the LiveOut set. LLVM's definition | |
+ // of instruction dominance states that V does not dominate itself. As | |
+ // such, we need to special case this to allow it. | |
+ if (TermOkay && TI == I) | |
+ continue; | |
+ DCHECK(DT.dominates(I, TI) && | |
+ "basic SSA liveness expectation violated by liveness analysis"); | |
+ } | |
+ } | |
+} | |
+ | |
+/// Check that all the liveness sets used during the computation of liveness | |
+/// obey basic SSA properties. This is useful for finding cases where we miss | |
+/// a def. | |
+static void checkBasicSSA(DominatorTree &DT, GCPtrLivenessData &Data, | |
+ BasicBlock &BB) { | |
+ checkBasicSSA(DT, Data.LiveSet[&BB], BB.getTerminator()); | |
+ checkBasicSSA(DT, Data.LiveOut[&BB], BB.getTerminator(), true); | |
+ checkBasicSSA(DT, Data.LiveIn[&BB], BB.getTerminator()); | |
+} | |
+#endif | |
+ | |
+static void computeLiveInValues(DominatorTree &DT, Function &F, | |
+ GCPtrLivenessData &Data, | |
+ ValueSet& gc_collected_pointers) { | |
+ | |
+ SmallSetVector<BasicBlock *, 200> Worklist; | |
+ auto AddPredsToWorklist = [&](BasicBlock *BB) { | |
+ // We use a SetVector so that we don't have duplicates in the worklist. | |
+ Worklist.insert(pred_begin(BB), pred_end(BB)); | |
+ }; | |
+ auto NextItem = [&]() { | |
+ BasicBlock *BB = Worklist.back(); | |
+ Worklist.pop_back(); | |
+ return BB; | |
+ }; | |
+ | |
+ // Seed the liveness for each individual block | |
+ for (BasicBlock &BB : F) { | |
+ Data.KillSet[&BB] = computeKillSet(&BB, gc_collected_pointers); | |
+ Data.LiveSet[&BB].clear(); | |
+ computeLiveInValues(BB.rbegin(), BB.rend(), Data.LiveSet[&BB], | |
+ gc_collected_pointers); | |
+ | |
+#ifndef NDEBUG | |
+ for (Value *Kill : Data.KillSet[&BB]) { | |
+ USE(Kill); | |
+ DCHECK(!Data.LiveSet[&BB].count(Kill) && "live set contains kill"); | |
+ } | |
+#endif | |
+ | |
+ Data.LiveOut[&BB] = DenseSet<Value *>(); | |
+ computeLiveOutSeed(&BB, Data.LiveOut[&BB], gc_collected_pointers); | |
+ Data.LiveIn[&BB] = Data.LiveSet[&BB]; | |
+ set_union(Data.LiveIn[&BB], Data.LiveOut[&BB]); | |
+ set_subtract(Data.LiveIn[&BB], Data.KillSet[&BB]); | |
+ if (!Data.LiveIn[&BB].empty()) | |
+ AddPredsToWorklist(&BB); | |
+ } | |
+ | |
+ // Propagate that liveness until stable | |
+ while (!Worklist.empty()) { | |
+ BasicBlock *BB = NextItem(); | |
+ | |
+ // Compute our new liveout set, then exit early if it hasn't changed | |
+ // despite the contribution of our successor. | |
+ DenseSet<Value *> LiveOut = Data.LiveOut[BB]; | |
+ const auto OldLiveOutSize = LiveOut.size(); | |
+ for (BasicBlock *Succ : successors(BB)) { | |
+ DCHECK(Data.LiveIn.count(Succ)); | |
+ set_union(LiveOut, Data.LiveIn[Succ]); | |
+ } | |
+ // DCHECK OutLiveOut is a subset of LiveOut | |
+ if (OldLiveOutSize == LiveOut.size()) { | |
+ // If the sets are the same size, then we didn't actually add anything | |
+ // when unioning our successors LiveIn Thus, the LiveIn of this block | |
+ // hasn't changed. | |
+ continue; | |
+ } | |
+ Data.LiveOut[BB] = LiveOut; | |
+ | |
+ // Apply the effects of this basic block | |
+ DenseSet<Value *> LiveTmp = LiveOut; | |
+ set_union(LiveTmp, Data.LiveSet[BB]); | |
+ set_subtract(LiveTmp, Data.KillSet[BB]); | |
+ | |
+ DCHECK(Data.LiveIn.count(BB)); | |
+ const DenseSet<Value *> &OldLiveIn = Data.LiveIn[BB]; | |
+ // DCHECK: OldLiveIn is a subset of LiveTmp | |
+ if (OldLiveIn.size() != LiveTmp.size()) { | |
+ Data.LiveIn[BB] = LiveTmp; | |
+ AddPredsToWorklist(BB); | |
+ } | |
+ } // while( !worklist.empty() ) | |
+ | |
+#ifndef NDEBUG | |
+ // Sanity check our output against SSA properties. This helps catch any | |
+ // missing kills during the above iteration. | |
+ for (BasicBlock &BB : F) { | |
+ checkBasicSSA(DT, Data, BB); | |
+ } | |
+#endif | |
+} | |
+ | |
+static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data, | |
+ StatepointLiveSetTy &Out, | |
+ ValueSet& gc_collected_pointers) { | |
+ | |
+ BasicBlock *BB = Inst->getParent(); | |
+ | |
+ // Note: The copy is intentional and required | |
+ DCHECK(Data.LiveOut.count(BB)); | |
+ DenseSet<Value *> LiveOut = Data.LiveOut[BB]; | |
+ | |
+ // We want to handle the statepoint itself oddly. It's | |
+ // call result is not live (normal), nor are it's arguments | |
+ // (unless they're used again later). This adjustment is | |
+ // specifically what we need to relocate | |
+ BasicBlock::reverse_iterator rend(Inst); | |
+ computeLiveInValues(BB->rbegin(), rend, LiveOut, gc_collected_pointers); | |
+ LiveOut.erase(Inst); | |
+ Out.insert(LiveOut.begin(), LiveOut.end()); | |
+} | |
+ | |
+static void recomputeLiveInValues(GCPtrLivenessData &RevisedLivenessData, | |
+ const CallSite &CS, | |
+ PartiallyConstructedSafepointRecord &Info, | |
+ ValueSet& gc_collected_pointers) { | |
+ Instruction *Inst = CS.getInstruction(); | |
+ StatepointLiveSetTy Updated; | |
+ findLiveSetAtInst(Inst, RevisedLivenessData, Updated, | |
+ gc_collected_pointers); | |
+ Info.liveset = Updated; | |
+} | |
diff --git a/src/llvm/pass-rewrite-safepoints.h b/src/llvm/pass-rewrite-safepoints.h | |
new file mode 100644 | |
index 0000000..9a64fb2 | |
--- /dev/null | |
+++ b/src/llvm/pass-rewrite-safepoints.h | |
@@ -0,0 +1,22 @@ | |
+// Copyright 2015 ISP RAS. All rights reserved. | |
+// Use of this source code is governed by a BSD-style license that can be | |
+// found in the LICENSE file. | |
+ | |
+#ifndef V8_PASS_REWRITE_SAFEPOINTS_H_ | |
+#define V8_PASS_REWRITE_SAFEPOINTS_H_ | |
+ | |
+#include "llvm-headers.h" | |
+ | |
+ | |
+namespace v8 { | |
+namespace internal { | |
+ | |
+using ValueSet = std::set<llvm::Value*>; | |
+ | |
+llvm::ModulePass* createRewriteStatepointsForGCPass(ValueSet&); | |
+ | |
+ | |
+} } // namespace v8::internal | |
+ | |
+ | |
+#endif /* V8_PASS_REWRITE_SAFEPOINTS_H_ */ | |
diff --git a/src/llvm/reg.h b/src/llvm/reg.h | |
new file mode 100644 | |
index 0000000..e0f6a5d | |
--- /dev/null | |
+++ b/src/llvm/reg.h | |
@@ -0,0 +1,53 @@ | |
+#include "src/x64/assembler-x64-inl.h" | |
+ | |
+namespace v8 { | |
+namespace internal { | |
+ | |
+// FIXME(llvm): our rebase to a more recent trunk has rendered | |
+// pretty much everything in here useless. So get rid of it! | |
+class StackMapReg { | |
+ public: | |
+ StackMapReg() | |
+ :index_ (-1) {} | |
+ | |
+ static StackMapReg FromIndex(unsigned index) { | |
+ StackMapReg result; | |
+ result.index_ = index; | |
+ return result; | |
+ } | |
+ bool IsIntReg() { | |
+ return index_ < Register::kNumRegisters; | |
+ } | |
+ | |
+ bool IsDoubleReg() { | |
+ return index_ >= kFirstXMMRegNumber | |
+ && index_ - kFirstXMMRegNumber < XMMRegister::kMaxNumRegisters; | |
+ } | |
+ | |
+ Register IntReg() { | |
+ DCHECK(IsIntReg()); | |
+ int const map[] = { 0, 2, 1, 3, 6, 7, 5, 4, 8, 9, 10, 11, 12, 13, 14, 15 }; | |
+ return Register::from_code(map[index_]); | |
+ } | |
+ | |
+ XMMRegister XMMReg() { | |
+ DCHECK(IsDoubleReg()); | |
+ return XMMRegister::from_code(index_ - kFirstXMMRegNumber); | |
+ } | |
+ | |
+ const char* ToString() { | |
+ if (IsIntReg()) | |
+ return IntReg().ToString(); | |
+ else if (IsDoubleReg()) | |
+ return XMMReg().ToString(); | |
+ else | |
+ UNREACHABLE(); | |
+ return "unknown"; | |
+ } | |
+ | |
+ private: | |
+ int index_; | |
+ static const int kFirstXMMRegNumber = 17; | |
+}; | |
+ | |
+} } // v8::internal | |
diff --git a/src/low-chunk.cc b/src/low-chunk.cc | |
new file mode 100644 | |
index 0000000..7b5fb1a | |
--- /dev/null | |
+++ b/src/low-chunk.cc | |
@@ -0,0 +1,46 @@ | |
+// Copyright 2015 ISP RAS. All rights reserved. | |
+// Use of this source code is governed by a BSD-style license that can be | |
+// found in the LICENSE file. | |
+ | |
+#include "low-chunk.h" | |
+ | |
+namespace v8 { | |
+namespace internal { | |
+ | |
+LowChunk::~LowChunk() {} | |
+ | |
+LowChunkBuilderBase::LowChunkBuilderBase(CompilationInfo* info, HGraph* graph) | |
+ : chunk_(nullptr), | |
+ info_(info), | |
+ graph_(graph), | |
+ status_(UNUSED), | |
+ argument_count_(0), | |
+ current_block_(nullptr), | |
+ next_block_(nullptr), | |
+ zone_(graph->zone()) {} | |
+ | |
+Isolate* LowChunkBuilderBase::isolate() const { | |
+ return graph_->isolate(); | |
+} | |
+ | |
+LowChunk::LowChunk(CompilationInfo* info, HGraph* graph) | |
+ : stability_dependencies_(8, info->zone()), | |
+ deprecation_dependencies_(32, info->zone()), | |
+ info_(info), | |
+ graph_(graph) {} | |
+ | |
+Isolate* LowChunk::isolate() const { | |
+ return graph_->isolate(); | |
+} | |
+ | |
+void LowChunkBuilderBase::Abort(BailoutReason reason) { | |
+ info()->AbortOptimization(reason); | |
+ status_ = ABORTED; | |
+} | |
+ | |
+void LowChunkBuilderBase::Retry(BailoutReason reason) { | |
+ info()->RetryOptimization(reason); | |
+ status_ = ABORTED; | |
+} | |
+ | |
+} } // namespace v8::internal | |
diff --git a/src/low-chunk.h b/src/low-chunk.h | |
new file mode 100644 | |
index 0000000..ab50870 | |
--- /dev/null | |
+++ b/src/low-chunk.h | |
@@ -0,0 +1,125 @@ | |
+// Copyright 2015 ISP RAS. All rights reserved. | |
+// Use of this source code is governed by a BSD-style license that can be | |
+// found in the LICENSE file. | |
+ | |
+#ifndef V8_LOWCHUNK_H_ | |
+#define V8_LOWCHUNK_H_ | |
+ | |
+//#include "compiler.h" | |
+#include "hydrogen.h" | |
+#include "zone-allocator.h" | |
+ | |
+ | |
+namespace v8 { | |
+namespace internal { | |
+ | |
+class LowChunk : public ZoneObject { | |
+ public: | |
+ virtual ~LowChunk(); | |
+ //virtual LowChunk* NewChunk(HGraph *graph) = 0; | |
+ virtual Handle<Code> Codegen() = 0; | |
+ Zone* zone() const { return info_->zone(); } | |
+ | |
+ CompilationInfo* info() const { return info_; } | |
+ HGraph* graph() const { return graph_; } | |
+ Isolate* isolate() const; | |
+ | |
+ void AddStabilityDependency(Handle<Map> map) { | |
+ DCHECK(map->is_stable()); | |
+ if (!map->CanTransition()) return; | |
+ DCHECK(!info()->IsStub()); | |
+ stability_dependencies_.Add(map, zone()); | |
+ } | |
+ | |
+ void AddDeprecationDependency(Handle<Map> map) { | |
+ DCHECK(!map->is_deprecated()); | |
+ if (!map->CanBeDeprecated()) return; | |
+ DCHECK(!info()->IsStub()); | |
+ deprecation_dependencies_.Add(map, zone()); | |
+ } | |
+ | |
+ protected: | |
+ LowChunk(CompilationInfo* info, HGraph* graph); | |
+ | |
+ ZoneList<Handle<Map>> stability_dependencies_; | |
+ ZoneList<Handle<Map>> deprecation_dependencies_; | |
+ | |
+ private: | |
+ CompilationInfo* info_; | |
+ HGraph* const graph_; | |
+}; | |
+ | |
+class LowChunkBuilderBase BASE_EMBEDDED { | |
+ public: | |
+ virtual ~LowChunkBuilderBase() {} // FIXME(llvm): virtuality now seems redundant | |
+ explicit LowChunkBuilderBase(CompilationInfo* info, HGraph* graph); | |
+ | |
+ void Abort(BailoutReason reason); | |
+ void Retry(BailoutReason reason); | |
+ | |
+ protected: | |
+ enum Status { UNUSED, BUILDING, DONE, ABORTED }; | |
+ | |
+ LowChunk* chunk() const { return chunk_; } | |
+ CompilationInfo* info() const { return info_; } | |
+ HGraph* graph() const { return graph_; } | |
+ Isolate* isolate() const; | |
+ Heap* heap() const { return isolate()->heap(); } | |
+ Zone* zone() const { return zone_; } | |
+ int argument_count() const { return argument_count_; } | |
+ | |
+ bool is_unused() const { return status_ == UNUSED; } | |
+ bool is_building() const { return status_ == BUILDING; } | |
+ bool is_done() const { return status_ == DONE; } | |
+ bool is_aborted() const { return status_ == ABORTED; } | |
+ | |
+ LowChunk* chunk_; | |
+ CompilationInfo* info_; | |
+ HGraph* const graph_; | |
+ Status status_; | |
+ int argument_count_; | |
+ HBasicBlock* current_block_; | |
+ HBasicBlock* next_block_; | |
+ | |
+ private: | |
+ Zone* zone_; | |
+}; | |
+ | |
+// FIXME(llvm): it seems we don't use this class at all | |
+// (it has only 1 subclass). | |
+class LowCodeGenBase BASE_EMBEDDED { | |
+ public: | |
+ LowCodeGenBase(LowChunk* chunk, CompilationInfo* info) | |
+ : chunk_(chunk), | |
+ info_(info), | |
+ zone_(info->zone()) {} | |
+ virtual ~LowCodeGenBase() {} | |
+ | |
+ LowChunk* chunk() const { return chunk_; } | |
+ HGraph* graph() const { return chunk()->graph(); } | |
+ Zone* zone() const { return zone_; } | |
+ CompilationInfo* info() const { return info_; } | |
+ Isolate* isolate() const { return info_->isolate(); } | |
+ Factory* factory() const { return isolate()->factory(); } | |
+ Heap* heap() const { return isolate()->heap(); } | |
+ | |
+ // Try to generate native code for the entire chunk, but it may fail if the | |
+ // chunk contains constructs we cannot handle. Returns true if the | |
+ // code generation attempt succeeded. | |
+ // FIXME(llvm): return this method to the child class (make non-virtual) | |
+ virtual bool GenerateCode() = 0; | |
+ | |
+ // Finish the code by setting stack height, safepoint, and bailout | |
+ // information on it. | |
+ // FIXME(llvm): return this method to the child class (make non-virtual) | |
+ // Or use it... | |
+ virtual void FinishCode(Handle<Code> code) = 0; | |
+ protected: | |
+ LowChunk* const chunk_; | |
+ CompilationInfo* const info_; | |
+ Zone* zone_; | |
+}; | |
+ | |
+} } // namespace v8::internal | |
+ | |
+#endif // V8_LOWCHUNK_H_ | |
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc | |
index cafa4cf..e6cf170 100644 | |
--- a/src/mips/lithium-mips.cc | |
+++ b/src/mips/lithium-mips.cc | |
@@ -427,14 +427,14 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { | |
LPlatformChunk* LChunkBuilder::Build() { | |
DCHECK(is_unused()); | |
chunk_ = new(zone()) LPlatformChunk(info(), graph()); | |
- LPhase phase("L_Building chunk", chunk_); | |
+ LPhase phase("L_Building chunk", chunk()); | |
status_ = BUILDING; | |
// If compiling for OSR, reserve space for the unoptimized frame, | |
// which will be subsumed into this frame. | |
if (graph()->has_osr()) { | |
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) { | |
- chunk_->GetNextSpillIndex(GENERAL_REGISTERS); | |
+ chunk()->GetNextSpillIndex(GENERAL_REGISTERS); | |
} | |
} | |
@@ -446,7 +446,7 @@ LPlatformChunk* LChunkBuilder::Build() { | |
if (is_aborted()) return NULL; | |
} | |
status_ = DONE; | |
- return chunk_; | |
+ return chunk(); | |
} | |
@@ -501,40 +501,40 @@ LOperand* LChunkBuilder::UseAtStart(HValue* value) { | |
LOperand* LChunkBuilder::UseOrConstant(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: Use(value); | |
} | |
LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseAtStart(value); | |
} | |
LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseRegister(value); | |
} | |
LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseRegisterAtStart(value); | |
} | |
LOperand* LChunkBuilder::UseConstant(HValue* value) { | |
- return chunk_->DefineConstantOperand(HConstant::cast(value)); | |
+ return chunk()->DefineConstantOperand(HConstant::cast(value)); | |
} | |
LOperand* LChunkBuilder::UseAny(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: Use(value, new(zone()) LUnallocated(LUnallocated::ANY)); | |
} | |
@@ -709,7 +709,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, | |
bool does_deopt = false; | |
if (right_value->IsConstant()) { | |
HConstant* constant = HConstant::cast(right_value); | |
- right = chunk_->DefineConstantOperand(constant); | |
+ right = chunk()->DefineConstantOperand(constant); | |
constant_value = constant->Integer32Value() & 0x1f; | |
// Left shifts can deoptimize if we shift by > 0 and the result cannot be | |
// truncated to smi. | |
@@ -820,7 +820,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { | |
argument_count_ = pred->argument_count(); | |
} | |
HInstruction* current = block->first(); | |
- int start = chunk_->instructions()->length(); | |
+ int start = chunk()->instructions()->length(); | |
while (current != NULL && !is_aborted()) { | |
// Code for constants in registers is generated lazily. | |
if (!current->EmitAtUses()) { | |
@@ -828,7 +828,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { | |
} | |
current = current->next(); | |
} | |
- int end = chunk_->instructions()->length() - 1; | |
+ int end = chunk()->instructions()->length() - 1; | |
if (end >= start) { | |
block->set_first_instruction_index(start); | |
block->set_last_instruction_index(end); | |
@@ -857,7 +857,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { | |
LInstruction* dummy = | |
new(zone()) LDummyUse(UseAny(current->OperandAt(i))); | |
dummy->set_hydrogen_value(current); | |
- chunk_->AddInstruction(dummy, current_block_); | |
+ chunk()->AddInstruction(dummy, current_block_); | |
} | |
} else { | |
HBasicBlock* successor; | |
@@ -923,7 +923,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr, | |
if (FLAG_stress_environments && !instr->HasEnvironment()) { | |
instr = AssignEnvironment(instr); | |
} | |
- chunk_->AddInstruction(instr, current_block_); | |
+ chunk()->AddInstruction(instr, current_block_); | |
if (instr->IsCall() || instr->IsPrologue()) { | |
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val; | |
diff --git a/src/mips64/lithium-mips64.cc b/src/mips64/lithium-mips64.cc | |
index 4595722..28b3058 100644 | |
--- a/src/mips64/lithium-mips64.cc | |
+++ b/src/mips64/lithium-mips64.cc | |
@@ -427,14 +427,14 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { | |
LPlatformChunk* LChunkBuilder::Build() { | |
DCHECK(is_unused()); | |
chunk_ = new(zone()) LPlatformChunk(info(), graph()); | |
- LPhase phase("L_Building chunk", chunk_); | |
+ LPhase phase("L_Building chunk", chunk_)); | |
status_ = BUILDING; | |
// If compiling for OSR, reserve space for the unoptimized frame, | |
// which will be subsumed into this frame. | |
if (graph()->has_osr()) { | |
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) { | |
- chunk_->GetNextSpillIndex(GENERAL_REGISTERS); | |
+ chunk()->GetNextSpillIndex(GENERAL_REGISTERS); | |
} | |
} | |
@@ -446,7 +446,7 @@ LPlatformChunk* LChunkBuilder::Build() { | |
if (is_aborted()) return NULL; | |
} | |
status_ = DONE; | |
- return chunk_; | |
+ return chunk(); | |
} | |
@@ -501,40 +501,40 @@ LOperand* LChunkBuilder::UseAtStart(HValue* value) { | |
LOperand* LChunkBuilder::UseOrConstant(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: Use(value); | |
} | |
LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseAtStart(value); | |
} | |
LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseRegister(value); | |
} | |
LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseRegisterAtStart(value); | |
} | |
LOperand* LChunkBuilder::UseConstant(HValue* value) { | |
- return chunk_->DefineConstantOperand(HConstant::cast(value)); | |
+ return chunk()->DefineConstantOperand(HConstant::cast(value)); | |
} | |
LOperand* LChunkBuilder::UseAny(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: Use(value, new(zone()) LUnallocated(LUnallocated::ANY)); | |
} | |
@@ -709,7 +709,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, | |
bool does_deopt = false; | |
if (right_value->IsConstant()) { | |
HConstant* constant = HConstant::cast(right_value); | |
- right = chunk_->DefineConstantOperand(constant); | |
+ right = chunk()->DefineConstantOperand(constant); | |
constant_value = constant->Integer32Value() & 0x1f; | |
// Left shifts can deoptimize if we shift by > 0 and the result cannot be | |
// truncated to smi. | |
@@ -820,7 +820,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { | |
argument_count_ = pred->argument_count(); | |
} | |
HInstruction* current = block->first(); | |
- int start = chunk_->instructions()->length(); | |
+ int start = chunk()->instructions()->length(); | |
while (current != NULL && !is_aborted()) { | |
// Code for constants in registers is generated lazily. | |
if (!current->EmitAtUses()) { | |
@@ -828,7 +828,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { | |
} | |
current = current->next(); | |
} | |
- int end = chunk_->instructions()->length() - 1; | |
+ int end = chunk()->instructions()->length() - 1; | |
if (end >= start) { | |
block->set_first_instruction_index(start); | |
block->set_last_instruction_index(end); | |
@@ -857,7 +857,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { | |
LInstruction* dummy = | |
new(zone()) LDummyUse(UseAny(current->OperandAt(i))); | |
dummy->set_hydrogen_value(current); | |
- chunk_->AddInstruction(dummy, current_block_); | |
+ chunk()->AddInstruction(dummy, current_block_); | |
} | |
} else { | |
HBasicBlock* successor; | |
@@ -923,7 +923,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr, | |
if (FLAG_stress_environments && !instr->HasEnvironment()) { | |
instr = AssignEnvironment(instr); | |
} | |
- chunk_->AddInstruction(instr, current_block_); | |
+ chunk()->AddInstruction(instr, current_block_); | |
if (instr->IsCall() || instr->IsPrologue()) { | |
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val; | |
diff --git a/src/objects-inl.h b/src/objects-inl.h | |
index 4f2bd7a..16fdebf 100644 | |
--- a/src/objects-inl.h | |
+++ b/src/objects-inl.h | |
@@ -5067,6 +5067,16 @@ inline void Code::set_is_turbofanned(bool value) { | |
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated); | |
} | |
+inline bool Code::is_llvmed() { | |
+ return IsLLVMedField::decode( | |
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset)); | |
+} | |
+ | |
+inline void Code::set_is_llvmed(bool value) { | |
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset); | |
+ int updated = IsLLVMedField::update(previous, value); | |
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated); | |
+} | |
inline bool Code::can_have_weak_objects() { | |
DCHECK(kind() == OPTIMIZED_FUNCTION); | |
@@ -5171,7 +5181,7 @@ void Code::set_builtin_index(int index) { | |
unsigned Code::stack_slots() { | |
- DCHECK(is_crankshafted()); | |
+ DCHECK(is_crankshafted() || is_llvmed()); | |
return StackSlotsField::decode( | |
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset)); | |
} | |
@@ -5179,7 +5189,7 @@ unsigned Code::stack_slots() { | |
void Code::set_stack_slots(unsigned slots) { | |
CHECK(slots <= (1 << kStackSlotsBitCount)); | |
- DCHECK(is_crankshafted()); | |
+ DCHECK(is_crankshafted() || is_llvmed()); | |
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset); | |
int updated = StackSlotsField::update(previous, slots); | |
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated); | |
@@ -5195,7 +5205,7 @@ unsigned Code::safepoint_table_offset() { | |
void Code::set_safepoint_table_offset(unsigned offset) { | |
CHECK(offset <= (1 << kSafepointTableOffsetBitCount)); | |
- DCHECK(is_crankshafted()); | |
+ DCHECK(is_crankshafted() || is_llvmed()); | |
DCHECK(IsAligned(offset, static_cast<unsigned>(kIntSize))); | |
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset); | |
int updated = SafepointTableOffsetField::update(previous, offset); | |
diff --git a/src/objects.cc b/src/objects.cc | |
index 80782d8..af964dd 100644 | |
--- a/src/objects.cc | |
+++ b/src/objects.cc | |
@@ -11628,18 +11628,36 @@ void Code::Relocate(intptr_t delta) { | |
Assembler::FlushICache(GetIsolate(), instruction_start(), instruction_size()); | |
} | |
- | |
-void Code::CopyFrom(const CodeDesc& desc) { | |
+// TODO(llvm): refactor out the common part and get rid of default parameters. | |
+void Code::CopyFrom(const CodeDesc& desc, | |
+ const CodeDesc* safepoint_table_desc, | |
+ const Vector<byte>* reloc_data, | |
+ int nop_size) { | |
DCHECK(Marking::Color(this) == Marking::WHITE_OBJECT); | |
// copy code | |
CopyBytes(instruction_start(), desc.buffer, | |
static_cast<size_t>(desc.instr_size)); | |
+ if (safepoint_table_desc) { | |
+#ifdef DEBUG | |
+ // Ensure control doesn't reach this memory (it is here only for alignment). | |
+ memset(instruction_start() + desc.instr_size, 0xCC, nop_size); // int3 | |
+#endif | |
+ // copy safepoint table | |
+ CopyBytes(instruction_start() + desc.instr_size + nop_size, | |
+ safepoint_table_desc->buffer, | |
+ static_cast<size_t>(safepoint_table_desc->instr_size)); | |
+ } | |
+ | |
// copy reloc info | |
- CopyBytes(relocation_start(), | |
- desc.buffer + desc.buffer_size - desc.reloc_size, | |
- static_cast<size_t>(desc.reloc_size)); | |
+ if (!reloc_data) { | |
+ CopyBytes(relocation_start(), | |
+ desc.buffer + desc.buffer_size - desc.reloc_size, | |
+ static_cast<size_t>(desc.reloc_size)); | |
+ } else { | |
+ CopyBytes(relocation_start(), reloc_data->start(), reloc_data->length()); | |
+ } | |
// unbox handles and relocate | |
intptr_t delta = instruction_start() - desc.buffer; | |
diff --git a/src/objects.h b/src/objects.h | |
index 880fb1c..ef37177 100644 | |
--- a/src/objects.h | |
+++ b/src/objects.h | |
@@ -4882,6 +4882,12 @@ class Code: public HeapObject { | |
inline bool is_turbofanned(); | |
inline void set_is_turbofanned(bool value); | |
+ // [is_llvmed]: Tells whether the code object was generated by the LLVM MCJIT | |
+ // optimizing compiler. | |
+ // TODO(llvm): implement | |
+ inline bool is_llvmed(); | |
+ inline void set_is_llvmed(bool value); | |
+ | |
// [can_have_weak_objects]: For kind OPTIMIZED_FUNCTION, tells whether the | |
// embedded objects in code should be treated weakly. | |
inline bool can_have_weak_objects(); | |
@@ -5053,7 +5059,10 @@ class Code: public HeapObject { | |
void Relocate(intptr_t delta); | |
// Migrate code described by desc. | |
- void CopyFrom(const CodeDesc& desc); | |
+ void CopyFrom(const CodeDesc& desc, | |
+ const CodeDesc* safepoint_table_desc = nullptr, | |
+ const Vector<byte>* reloc_data = nullptr, | |
+ int nop_size = 0); | |
// Returns the object size for a given body (used for allocation). | |
static int SizeFor(int body_size) { | |
@@ -5200,7 +5209,8 @@ class Code: public HeapObject { | |
kStackSlotsFirstBit + kStackSlotsBitCount; | |
static const int kMarkedForDeoptimizationBit = kHasFunctionCacheBit + 1; | |
static const int kIsTurbofannedBit = kMarkedForDeoptimizationBit + 1; | |
- static const int kCanHaveWeakObjects = kIsTurbofannedBit + 1; | |
+ static const int kIsLLVMedBit = kIsTurbofannedBit + 1; | |
+ static const int kCanHaveWeakObjects = kIsLLVMedBit + 1; | |
STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32); | |
STATIC_ASSERT(kCanHaveWeakObjects + 1 <= 32); | |
@@ -5213,6 +5223,8 @@ class Code: public HeapObject { | |
: public BitField<bool, kMarkedForDeoptimizationBit, 1> {}; // NOLINT | |
class IsTurbofannedField : public BitField<bool, kIsTurbofannedBit, 1> { | |
}; // NOLINT | |
+ class IsLLVMedField : public BitField<bool, kIsLLVMedBit, 1> { | |
+ }; // NOLINT | |
class CanHaveWeakObjectsField | |
: public BitField<bool, kCanHaveWeakObjects, 1> {}; // NOLINT | |
diff --git a/src/ppc/lithium-ppc.cc b/src/ppc/lithium-ppc.cc | |
index 09b3976..a88a9b7 100644 | |
--- a/src/ppc/lithium-ppc.cc | |
+++ b/src/ppc/lithium-ppc.cc | |
@@ -433,14 +433,14 @@ LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { | |
LPlatformChunk* LChunkBuilder::Build() { | |
DCHECK(is_unused()); | |
chunk_ = new (zone()) LPlatformChunk(info(), graph()); | |
- LPhase phase("L_Building chunk", chunk_); | |
+ LPhase phase("L_Building chunk", chunk()); | |
status_ = BUILDING; | |
// If compiling for OSR, reserve space for the unoptimized frame, | |
// which will be subsumed into this frame. | |
if (graph()->has_osr()) { | |
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) { | |
- chunk_->GetNextSpillIndex(GENERAL_REGISTERS); | |
+ chunk()->GetNextSpillIndex(GENERAL_REGISTERS); | |
} | |
} | |
@@ -452,7 +452,7 @@ LPlatformChunk* LChunkBuilder::Build() { | |
if (is_aborted()) return NULL; | |
} | |
status_ = DONE; | |
- return chunk_; | |
+ return chunk(); | |
} | |
@@ -507,40 +507,40 @@ LOperand* LChunkBuilder::UseAtStart(HValue* value) { | |
LOperand* LChunkBuilder::UseOrConstant(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: Use(value); | |
} | |
LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseAtStart(value); | |
} | |
LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseRegister(value); | |
} | |
LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseRegisterAtStart(value); | |
} | |
LOperand* LChunkBuilder::UseConstant(HValue* value) { | |
- return chunk_->DefineConstantOperand(HConstant::cast(value)); | |
+ return chunk()->DefineConstantOperand(HConstant::cast(value)); | |
} | |
LOperand* LChunkBuilder::UseAny(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: Use(value, new (zone()) LUnallocated(LUnallocated::ANY)); | |
} | |
@@ -713,7 +713,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, | |
bool does_deopt = false; | |
if (right_value->IsConstant()) { | |
HConstant* constant = HConstant::cast(right_value); | |
- right = chunk_->DefineConstantOperand(constant); | |
+ right = chunk()->DefineConstantOperand(constant); | |
constant_value = constant->Integer32Value() & 0x1f; | |
// Left shifts can deoptimize if we shift by > 0 and the result cannot be | |
// truncated to smi. | |
@@ -824,7 +824,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { | |
argument_count_ = pred->argument_count(); | |
} | |
HInstruction* current = block->first(); | |
- int start = chunk_->instructions()->length(); | |
+ int start = chunk()->instructions()->length(); | |
while (current != NULL && !is_aborted()) { | |
// Code for constants in registers is generated lazily. | |
if (!current->EmitAtUses()) { | |
@@ -832,7 +832,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { | |
} | |
current = current->next(); | |
} | |
- int end = chunk_->instructions()->length() - 1; | |
+ int end = chunk()->instructions()->length() - 1; | |
if (end >= start) { | |
block->set_first_instruction_index(start); | |
block->set_last_instruction_index(end); | |
@@ -861,7 +861,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { | |
LInstruction* dummy = | |
new (zone()) LDummyUse(UseAny(current->OperandAt(i))); | |
dummy->set_hydrogen_value(current); | |
- chunk_->AddInstruction(dummy, current_block_); | |
+ chunk()->AddInstruction(dummy, current_block_); | |
} | |
} else { | |
HBasicBlock* successor; | |
@@ -927,7 +927,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr, | |
if (FLAG_stress_environments && !instr->HasEnvironment()) { | |
instr = AssignEnvironment(instr); | |
} | |
- chunk_->AddInstruction(instr, current_block_); | |
+ chunk()->AddInstruction(instr, current_block_); | |
if (instr->IsCall() || instr->IsPrologue()) { | |
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val; | |
diff --git a/src/runtime/runtime-compiler.cc b/src/runtime/runtime-compiler.cc | |
index 8790da0..3454167 100644 | |
--- a/src/runtime/runtime-compiler.cc | |
+++ b/src/runtime/runtime-compiler.cc | |
@@ -280,8 +280,8 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) { | |
DeoptimizationInputData* data = | |
DeoptimizationInputData::cast(result->deoptimization_data()); | |
- if (data->OsrPcOffset()->value() >= 0) { | |
- DCHECK(BailoutId(data->OsrAstId()->value()) == ast_id); | |
+ if (data->OsrPcOffset()->value() >= 0 || result->is_llvmed()) { | |
+ // DCHECK(BailoutId(data->OsrAstId()->value()) == ast_id); | |
if (FLAG_trace_osr) { | |
PrintF("[OSR - Entry at AST id %d, offset %d in optimized code]\n", | |
ast_id.ToInt(), data->OsrPcOffset()->value()); | |
diff --git a/src/runtime/runtime-internal.cc b/src/runtime/runtime-internal.cc | |
index fdf3961..7162aa2 100644 | |
--- a/src/runtime/runtime-internal.cc | |
+++ b/src/runtime/runtime-internal.cc | |
@@ -235,9 +235,10 @@ RUNTIME_FUNCTION(Runtime_AllocateInNewSpace) { | |
RUNTIME_FUNCTION(Runtime_AllocateInTargetSpace) { | |
HandleScope scope(isolate); | |
- DCHECK(args.length() == 2); | |
- CONVERT_SMI_ARG_CHECKED(size, 0); | |
- CONVERT_SMI_ARG_CHECKED(flags, 1); | |
+ int indx1 = 0; | |
+ int indx2 = 1; | |
+ CONVERT_SMI_ARG_CHECKED(size, indx1); | |
+ CONVERT_SMI_ARG_CHECKED(flags, indx2); | |
RUNTIME_ASSERT(IsAligned(size, kPointerSize)); | |
RUNTIME_ASSERT(size > 0); | |
RUNTIME_ASSERT(size <= Page::kMaxRegularHeapObjectSize); | |
diff --git a/src/safepoint-table.cc b/src/safepoint-table.cc | |
index 4c1c02a..60b985d 100644 | |
--- a/src/safepoint-table.cc | |
+++ b/src/safepoint-table.cc | |
@@ -35,7 +35,7 @@ bool SafepointEntry::HasRegisterAt(int reg_index) const { | |
SafepointTable::SafepointTable(Code* code) { | |
- DCHECK(code->is_crankshafted()); | |
+ DCHECK(code->is_crankshafted() || code->is_llvmed()); | |
code_ = code; | |
Address header = code->instruction_start() + code->safepoint_table_offset(); | |
length_ = Memory::uint32_at(header + kLengthOffset); | |
@@ -100,14 +100,16 @@ void Safepoint::DefinePointerRegister(Register reg, Zone* zone) { | |
Safepoint SafepointTableBuilder::DefineSafepoint( | |
- Assembler* assembler, | |
+ unsigned pc, | |
Safepoint::Kind kind, | |
int arguments, | |
- Safepoint::DeoptMode deopt_mode) { | |
+ Safepoint::DeoptMode deopt_mode, | |
+ size_t num_function_args) { | |
DCHECK(arguments >= 0); | |
DeoptimizationInfo info; | |
- info.pc = assembler->pc_offset(); | |
+ info.pc = pc; | |
info.arguments = arguments; | |
+ info.num_function_args = num_function_args; | |
info.has_doubles = (kind & Safepoint::kWithDoubles); | |
deoptimization_info_.Add(info, zone_); | |
deopt_index_list_.Add(Safepoint::kNoDeoptimizationIndex, zone_); | |
@@ -123,6 +125,15 @@ Safepoint SafepointTableBuilder::DefineSafepoint( | |
} | |
+Safepoint SafepointTableBuilder::DefineSafepoint( | |
+ Assembler* assembler, | |
+ Safepoint::Kind kind, | |
+ int arguments, | |
+ Safepoint::DeoptMode deopt_mode) { | |
+ return DefineSafepoint(assembler->pc_offset(), kind, arguments, deopt_mode, 0); | |
+} | |
+ | |
+ | |
void SafepointTableBuilder::RecordLazyDeoptimizationIndex(int index) { | |
while (last_lazy_safepoint_ < deopt_index_list_.length()) { | |
deopt_index_list_[last_lazy_safepoint_++] = index; | |
@@ -135,10 +146,14 @@ unsigned SafepointTableBuilder::GetCodeOffset() const { | |
} | |
-void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) { | |
- // Make sure the safepoint table is properly aligned. Pad with nops. | |
- assembler->Align(kIntSize); | |
- assembler->RecordComment(";;; Safepoint table."); | |
+void SafepointTableBuilder::Emit(Assembler* assembler, | |
+ int bits_per_entry, | |
+ bool for_llvmed) { | |
+ if (!for_llvmed) { | |
+ // Make sure the safepoint table is properly aligned. Pad with nops. | |
+ assembler->Align(kIntSize); | |
+ assembler->RecordComment(";;; Safepoint table."); | |
+ } | |
offset_ = assembler->pc_offset(); | |
// Take the register bits into account. | |
@@ -205,6 +220,8 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) { | |
uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info, | |
unsigned index) { | |
uint32_t encoding = SafepointEntry::DeoptimizationIndexField::encode(index); | |
+ encoding |= SafepointEntry::NumPassedArgumentsField::encode( | |
+ info.num_function_args); | |
encoding |= SafepointEntry::ArgumentsField::encode(info.arguments); | |
encoding |= SafepointEntry::SaveDoublesField::encode(info.has_doubles); | |
return encoding; | |
diff --git a/src/safepoint-table.h b/src/safepoint-table.h | |
index fbb0152..b50a40e 100644 | |
--- a/src/safepoint-table.h | |
+++ b/src/safepoint-table.h | |
@@ -39,21 +39,42 @@ class SafepointEntry BASE_EMBEDDED { | |
return DeoptimizationIndexField::decode(info_); | |
} | |
+ // FIXME(llvm): it effectively limits number of function arguments to 2**10. | |
+ static const int kNumPassedArgumentsFieldBits = 10; | |
static const int kArgumentsFieldBits = 3; | |
static const int kSaveDoublesFieldBits = 1; | |
static const int kDeoptIndexBits = | |
- 32 - kArgumentsFieldBits - kSaveDoublesFieldBits; | |
+ 32 - kNumPassedArgumentsFieldBits - | |
+ kArgumentsFieldBits - kSaveDoublesFieldBits; | |
+ | |
+ static const int kDeoptIndexShift = 0; | |
+ static const int kNumPassedArgumentsShift = | |
+ kDeoptIndexShift + kDeoptIndexBits; | |
+ static const int kArgumentsShift = | |
+ kNumPassedArgumentsShift + kNumPassedArgumentsFieldBits; | |
+ static const int kSaveDoublesShift = | |
+ kArgumentsShift + kArgumentsFieldBits; | |
+ | |
class DeoptimizationIndexField: | |
- public BitField<int, 0, kDeoptIndexBits> {}; // NOLINT | |
+ public BitField<int, kDeoptIndexShift, kDeoptIndexBits> {}; // NOLINT | |
+ class NumPassedArgumentsField: | |
+ public BitField<size_t, | |
+ kNumPassedArgumentsShift, | |
+ kNumPassedArgumentsFieldBits> {}; | |
class ArgumentsField: | |
public BitField<unsigned, | |
- kDeoptIndexBits, | |
+ kArgumentsShift, | |
kArgumentsFieldBits> {}; // NOLINT | |
class SaveDoublesField: | |
public BitField<bool, | |
- kDeoptIndexBits + kArgumentsFieldBits, | |
+ kSaveDoublesShift, | |
kSaveDoublesFieldBits> { }; // NOLINT | |
+ size_t num_function_args() const { | |
+ DCHECK(is_valid()); | |
+ return NumPassedArgumentsField::decode(info_); | |
+ } | |
+ | |
int argument_count() const { | |
DCHECK(is_valid()); | |
return ArgumentsField::decode(info_); | |
@@ -195,6 +216,13 @@ class SafepointTableBuilder BASE_EMBEDDED { | |
int arguments, | |
Safepoint::DeoptMode mode); | |
+ // Define a new safepoint for the given pc. | |
+ Safepoint DefineSafepoint(unsigned pc, | |
+ Safepoint::Kind kind, | |
+ int arguments, | |
+ Safepoint::DeoptMode mode, | |
+ size_t num_passed_arguments); | |
+ | |
// Record deoptimization index for lazy deoptimization for the last | |
// outstanding safepoints. | |
void RecordLazyDeoptimizationIndex(int index); | |
@@ -204,7 +232,7 @@ class SafepointTableBuilder BASE_EMBEDDED { | |
// Emit the safepoint table after the body. The number of bits per | |
// entry must be enough to hold all the pointer indexes. | |
- void Emit(Assembler* assembler, int bits_per_entry); | |
+ void Emit(Assembler* assembler, int bits_per_entry, bool for_llvmed = false); | |
private: | |
@@ -212,6 +240,7 @@ class SafepointTableBuilder BASE_EMBEDDED { | |
unsigned pc; | |
unsigned arguments; | |
bool has_doubles; | |
+ size_t num_function_args; | |
}; | |
uint32_t EncodeExceptPC(const DeoptimizationInfo& info, unsigned index); | |
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h | |
index 1737658..a6b810f 100644 | |
--- a/src/x64/assembler-x64-inl.h | |
+++ b/src/x64/assembler-x64-inl.h | |
@@ -53,6 +53,16 @@ void Assembler::emitw(uint16_t x) { | |
pc_ += sizeof(uint16_t); | |
} | |
+uint32_t Assembler::GetCodeTargetIndex(Handle<Code> target) { | |
+ int current = code_targets_.length(); | |
+ if (current > 0 && code_targets_.last().is_identical_to(target)) { | |
+ // Optimization if we keep jumping to the same code target. | |
+ return current - 1; | |
+ } else { | |
+ code_targets_.Add(target); | |
+ return current; | |
+ } | |
+} | |
void Assembler::emit_code_target(Handle<Code> target, | |
RelocInfo::Mode rmode, | |
@@ -64,14 +74,8 @@ void Assembler::emit_code_target(Handle<Code> target, | |
} else { | |
RecordRelocInfo(rmode); | |
} | |
- int current = code_targets_.length(); | |
- if (current > 0 && code_targets_.last().is_identical_to(target)) { | |
- // Optimization if we keep jumping to the same code target. | |
- emitl(current - 1); | |
- } else { | |
- code_targets_.Add(target); | |
- emitl(current); | |
- } | |
+ auto code_target_index = GetCodeTargetIndex(target); | |
+ emitl(code_target_index); | |
} | |
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h | |
index b12fc4c..b73f00d 100644 | |
--- a/src/x64/assembler-x64.h | |
+++ b/src/x64/assembler-x64.h | |
@@ -221,6 +221,7 @@ struct DoubleRegister { | |
bool IsAllocatable() const; | |
bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; } | |
bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; } | |
+ | |
int code() const { | |
DCHECK(is_valid()); | |
return reg_code; | |
@@ -1550,6 +1551,8 @@ class Assembler : public AssemblerBase { | |
void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2); | |
void vpd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2); | |
+ inline uint32_t GetCodeTargetIndex(Handle<Code> target); | |
+ | |
// Debugging | |
void Print(); | |
@@ -1624,7 +1627,6 @@ class Assembler : public AssemblerBase { | |
// code emission | |
void GrowBuffer(); | |
- | |
void emit(byte x) { *pc_++ = x; } | |
inline void emitl(uint32_t x); | |
inline void emitp(void* x, RelocInfo::Mode rmode); | |
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc | |
index 03f6fa2..1384646 100644 | |
--- a/src/x64/builtins-x64.cc | |
+++ b/src/x64/builtins-x64.cc | |
@@ -94,12 +94,19 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) { | |
} | |
+static void GenerateTailCallToReturnedCodeOpt(MacroAssembler* masm) { | |
+ __ leap(rax, FieldOperand(rax, Code::kHeaderSize)); | |
+ __ movp(rbx, Immediate(0)); | |
+ __ jmp(rax); | |
+} | |
static void GenerateTailCallToReturnedCode(MacroAssembler* masm) { | |
__ leap(rax, FieldOperand(rax, Code::kHeaderSize)); | |
+ __ movp(rbx, Immediate(0)); | |
__ jmp(rax); | |
} | |
+ | |
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) { | |
// Checking whether the queued function is ready for install is optional, | |
// since we come across interrupts and stack checks elsewhere. However, | |
@@ -844,7 +851,7 @@ static void CallCompileOptimized(MacroAssembler* masm, | |
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) { | |
CallCompileOptimized(masm, false); | |
- GenerateTailCallToReturnedCode(masm); | |
+ GenerateTailCallToReturnedCodeOpt(masm); | |
} | |
@@ -1901,17 +1908,15 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { | |
// Load deoptimization data from the code object. | |
__ movp(rbx, Operand(rax, Code::kDeoptimizationDataOffset - kHeapObjectTag)); | |
- | |
// Load the OSR entrypoint offset from the deoptimization data. | |
__ SmiToInteger32(rbx, Operand(rbx, FixedArray::OffsetOfElementAt( | |
DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag)); | |
- | |
// Compute the target address = code_obj + header_size + osr_offset | |
__ leap(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag)); | |
// Overwrite the return address on the stack. | |
__ movq(StackOperandForReturnAddress(0), rax); | |
- | |
+ __ movq(rbx, Immediate(1)); //Is Osr Entry | |
// And "return" to the OSR entry point of the function. | |
__ ret(0); | |
} | |
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc | |
index b083f8a..6ed4a5f 100644 | |
--- a/src/x64/lithium-codegen-x64.cc | |
+++ b/src/x64/lithium-codegen-x64.cc | |
@@ -458,23 +458,23 @@ XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | |
bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const { | |
- return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); | |
+ return chunk()->LookupLiteralRepresentation(op).IsSmiOrInteger32(); | |
} | |
bool LCodeGen::IsExternalConstant(LConstantOperand* op) const { | |
- return chunk_->LookupLiteralRepresentation(op).IsExternal(); | |
+ return chunk()->LookupLiteralRepresentation(op).IsExternal(); | |
} | |
bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const { | |
return op->IsConstantOperand() && | |
- chunk_->IsDehoistedKey(chunk_->LookupConstant(op)); | |
+ chunk()->IsDehoistedKey(chunk()->LookupConstant(op)); | |
} | |
bool LCodeGen::IsSmiConstant(LConstantOperand* op) const { | |
- return chunk_->LookupLiteralRepresentation(op).IsSmi(); | |
+ return chunk()->LookupLiteralRepresentation(op).IsSmi(); | |
} | |
@@ -485,7 +485,7 @@ int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { | |
int32_t LCodeGen::ToRepresentation(LConstantOperand* op, | |
const Representation& r) const { | |
- HConstant* constant = chunk_->LookupConstant(op); | |
+ HConstant* constant = chunk()->LookupConstant(op); | |
int32_t value = constant->Integer32Value(); | |
if (r.IsInteger32()) return value; | |
DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged()); | |
@@ -494,28 +494,28 @@ int32_t LCodeGen::ToRepresentation(LConstantOperand* op, | |
Smi* LCodeGen::ToSmi(LConstantOperand* op) const { | |
- HConstant* constant = chunk_->LookupConstant(op); | |
+ HConstant* constant = chunk()->LookupConstant(op); | |
return Smi::FromInt(constant->Integer32Value()); | |
} | |
double LCodeGen::ToDouble(LConstantOperand* op) const { | |
- HConstant* constant = chunk_->LookupConstant(op); | |
+ HConstant* constant = chunk()->LookupConstant(op); | |
DCHECK(constant->HasDoubleValue()); | |
return constant->DoubleValue(); | |
} | |
ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const { | |
- HConstant* constant = chunk_->LookupConstant(op); | |
+ HConstant* constant = chunk()->LookupConstant(op); | |
DCHECK(constant->HasExternalReferenceValue()); | |
return constant->ExternalReferenceValue(); | |
} | |
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { | |
- HConstant* constant = chunk_->LookupConstant(op); | |
- DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); | |
+ HConstant* constant = chunk()->LookupConstant(op); | |
+ DCHECK(chunk()->LookupLiteralRepresentation(op).IsSmiOrTagged()); | |
return constant->handle(isolate()); | |
} | |
@@ -684,7 +684,7 @@ void LCodeGen::LoadContextFromDeferred(LOperand* context) { | |
__ movp(rsi, ToOperand(context)); | |
} else if (context->IsConstantOperand()) { | |
HConstant* constant = | |
- chunk_->LookupConstant(LConstantOperand::cast(context)); | |
+ chunk()->LookupConstant(LConstantOperand::cast(context)); | |
__ Move(rsi, Handle<Object>::cast(constant->handle(isolate()))); | |
} else { | |
UNREACHABLE(); | |
@@ -1005,6 +1005,7 @@ void LCodeGen::DoCallStub(LCallStub* instr) { | |
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { | |
+ // __ int3(); | |
GenerateOsrPrologue(); | |
} | |
@@ -1051,7 +1052,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) { | |
DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero); | |
return; | |
} | |
- | |
+ | |
__ TruncatingDiv(dividend, Abs(divisor)); | |
__ imull(rdx, rdx, Immediate(Abs(divisor))); | |
__ movl(rax, dividend); | |
@@ -1071,7 +1072,6 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) { | |
void LCodeGen::DoModI(LModI* instr) { | |
HMod* hmod = instr->hydrogen(); | |
- | |
Register left_reg = ToRegister(instr->left()); | |
DCHECK(left_reg.is(rax)); | |
Register right_reg = ToRegister(instr->right()); | |
@@ -2073,21 +2073,21 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { | |
template<class InstrType> | |
void LCodeGen::EmitBranch(InstrType instr, Condition cc) { | |
- int left_block = instr->TrueDestination(chunk_); | |
- int right_block = instr->FalseDestination(chunk_); | |
+ int left_block = instr->TrueDestination(chunk()); | |
+ int right_block = instr->FalseDestination(chunk()); | |
int next_block = GetNextEmittedBlock(); | |
if (right_block == left_block || cc == no_condition) { | |
EmitGoto(left_block); | |
} else if (left_block == next_block) { | |
- __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); | |
+ __ j(NegateCondition(cc), chunk()->GetAssemblyLabel(right_block)); | |
} else if (right_block == next_block) { | |
- __ j(cc, chunk_->GetAssemblyLabel(left_block)); | |
+ __ j(cc, chunk()->GetAssemblyLabel(left_block)); | |
} else { | |
- __ j(cc, chunk_->GetAssemblyLabel(left_block)); | |
+ __ j(cc, chunk()->GetAssemblyLabel(left_block)); | |
if (cc != always) { | |
- __ jmp(chunk_->GetAssemblyLabel(right_block)); | |
+ __ jmp(chunk()->GetAssemblyLabel(right_block)); | |
} | |
} | |
} | |
@@ -2095,15 +2095,15 @@ void LCodeGen::EmitBranch(InstrType instr, Condition cc) { | |
template <class InstrType> | |
void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) { | |
- int true_block = instr->TrueDestination(chunk_); | |
- __ j(cc, chunk_->GetAssemblyLabel(true_block)); | |
+ int true_block = instr->TrueDestination(chunk()); | |
+ __ j(cc, chunk()->GetAssemblyLabel(true_block)); | |
} | |
template <class InstrType> | |
void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) { | |
- int false_block = instr->FalseDestination(chunk_); | |
- __ j(cc, chunk_->GetAssemblyLabel(false_block)); | |
+ int false_block = instr->FalseDestination(chunk()); | |
+ __ j(cc, chunk()->GetAssemblyLabel(false_block)); | |
} | |
@@ -2164,27 +2164,27 @@ void LCodeGen::DoBranch(LBranch* instr) { | |
if (expected.Contains(ToBooleanStub::UNDEFINED)) { | |
// undefined -> false. | |
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex); | |
- __ j(equal, instr->FalseLabel(chunk_)); | |
+ __ j(equal, instr->FalseLabel(chunk())); | |
} | |
if (expected.Contains(ToBooleanStub::BOOLEAN)) { | |
// true -> true. | |
__ CompareRoot(reg, Heap::kTrueValueRootIndex); | |
- __ j(equal, instr->TrueLabel(chunk_)); | |
+ __ j(equal, instr->TrueLabel(chunk())); | |
// false -> false. | |
__ CompareRoot(reg, Heap::kFalseValueRootIndex); | |
- __ j(equal, instr->FalseLabel(chunk_)); | |
+ __ j(equal, instr->FalseLabel(chunk())); | |
} | |
if (expected.Contains(ToBooleanStub::NULL_TYPE)) { | |
// 'null' -> false. | |
__ CompareRoot(reg, Heap::kNullValueRootIndex); | |
- __ j(equal, instr->FalseLabel(chunk_)); | |
+ __ j(equal, instr->FalseLabel(chunk())); | |
} | |
if (expected.Contains(ToBooleanStub::SMI)) { | |
// Smis: 0 -> false, all other -> true. | |
__ Cmp(reg, Smi::FromInt(0)); | |
- __ j(equal, instr->FalseLabel(chunk_)); | |
- __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | |
+ __ j(equal, instr->FalseLabel(chunk())); | |
+ __ JumpIfSmi(reg, instr->TrueLabel(chunk())); | |
} else if (expected.NeedsMap()) { | |
// If we need a map later and have a Smi -> deopt. | |
__ testb(reg, Immediate(kSmiTagMask)); | |
@@ -2199,14 +2199,14 @@ void LCodeGen::DoBranch(LBranch* instr) { | |
// Undetectable -> false. | |
__ testb(FieldOperand(map, Map::kBitFieldOffset), | |
Immediate(1 << Map::kIsUndetectable)); | |
- __ j(not_zero, instr->FalseLabel(chunk_)); | |
+ __ j(not_zero, instr->FalseLabel(chunk())); | |
} | |
} | |
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { | |
// spec object -> true. | |
__ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE); | |
- __ j(above_equal, instr->TrueLabel(chunk_)); | |
+ __ j(above_equal, instr->TrueLabel(chunk())); | |
} | |
if (expected.Contains(ToBooleanStub::STRING)) { | |
@@ -2215,21 +2215,21 @@ void LCodeGen::DoBranch(LBranch* instr) { | |
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE); | |
__ j(above_equal, ¬_string, Label::kNear); | |
__ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0)); | |
- __ j(not_zero, instr->TrueLabel(chunk_)); | |
- __ jmp(instr->FalseLabel(chunk_)); | |
+ __ j(not_zero, instr->TrueLabel(chunk())); | |
+ __ jmp(instr->FalseLabel(chunk())); | |
__ bind(¬_string); | |
} | |
if (expected.Contains(ToBooleanStub::SYMBOL)) { | |
// Symbol value -> true. | |
__ CmpInstanceType(map, SYMBOL_TYPE); | |
- __ j(equal, instr->TrueLabel(chunk_)); | |
+ __ j(equal, instr->TrueLabel(chunk())); | |
} | |
if (expected.Contains(ToBooleanStub::SIMD_VALUE)) { | |
// SIMD value -> true. | |
__ CmpInstanceType(map, SIMD128_VALUE_TYPE); | |
- __ j(equal, instr->TrueLabel(chunk_)); | |
+ __ j(equal, instr->TrueLabel(chunk())); | |
} | |
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { | |
@@ -2240,8 +2240,8 @@ void LCodeGen::DoBranch(LBranch* instr) { | |
XMMRegister xmm_scratch = double_scratch0(); | |
__ xorps(xmm_scratch, xmm_scratch); | |
__ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); | |
- __ j(zero, instr->FalseLabel(chunk_)); | |
- __ jmp(instr->TrueLabel(chunk_)); | |
+ __ j(zero, instr->FalseLabel(chunk())); | |
+ __ jmp(instr->TrueLabel(chunk())); | |
__ bind(¬_heap_number); | |
} | |
@@ -2257,7 +2257,7 @@ void LCodeGen::DoBranch(LBranch* instr) { | |
void LCodeGen::EmitGoto(int block) { | |
if (!IsNextEmittedBlock(block)) { | |
- __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block))); | |
+ __ jmp(chunk()->GetAssemblyLabel(chunk()->LookupDestination(block))); | |
} | |
} | |
@@ -2313,14 +2313,14 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { | |
double left_val = ToDouble(LConstantOperand::cast(left)); | |
double right_val = ToDouble(LConstantOperand::cast(right)); | |
int next_block = EvalComparison(instr->op(), left_val, right_val) ? | |
- instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); | |
+ instr->TrueDestination(chunk()) : instr->FalseDestination(chunk()); | |
EmitGoto(next_block); | |
} else { | |
if (instr->is_double()) { | |
// Don't base result on EFLAGS when a NaN is involved. Instead | |
// jump to the false block. | |
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); | |
- __ j(parity_even, instr->FalseLabel(chunk_)); | |
+ __ j(parity_even, instr->FalseLabel(chunk())); | |
} else { | |
int32_t value; | |
if (right->IsConstantOperand()) { | |
@@ -2450,7 +2450,7 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { | |
? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | |
Condition true_cond = EmitIsString( | |
- reg, temp, instr->FalseLabel(chunk_), check_needed); | |
+ reg, temp, instr->FalseLabel(chunk()), check_needed); | |
EmitBranch(instr, true_cond); | |
} | |
@@ -2474,7 +2474,7 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { | |
Register temp = ToRegister(instr->temp()); | |
if (!instr->hydrogen()->value()->type().IsHeapObject()) { | |
- __ JumpIfSmi(input, instr->FalseLabel(chunk_)); | |
+ __ JumpIfSmi(input, instr->FalseLabel(chunk())); | |
} | |
__ movp(temp, FieldOperand(input, HeapObject::kMapOffset)); | |
__ testb(FieldOperand(temp, Map::kBitFieldOffset), | |
@@ -2520,7 +2520,7 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { | |
Register input = ToRegister(instr->value()); | |
if (!instr->hydrogen()->value()->type().IsHeapObject()) { | |
- __ JumpIfSmi(input, instr->FalseLabel(chunk_)); | |
+ __ JumpIfSmi(input, instr->FalseLabel(chunk())); | |
} | |
__ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister); | |
@@ -2624,7 +2624,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { | |
Register temp2 = ToRegister(instr->temp2()); | |
Handle<String> class_name = instr->hydrogen()->class_name(); | |
- EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), | |
+ EmitClassOfTest(instr->TrueLabel(chunk()), instr->FalseLabel(chunk()), | |
class_name, input, temp, temp2); | |
EmitBranch(instr, equal); | |
@@ -3084,7 +3084,6 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | |
bool requires_hole_check = hinstr->RequiresHoleCheck(); | |
Representation representation = hinstr->representation(); | |
int offset = instr->base_offset(); | |
- | |
if (kPointerSize == kInt32Size && !key->IsConstantOperand() && | |
instr->hydrogen()->IsDehoisted()) { | |
// Sign extend key because it could be a 32 bit negative value | |
@@ -3110,7 +3109,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | |
DCHECK(kSmiTagSize + kSmiShiftSize == 32); | |
offset += kPointerSize / 2; | |
} | |
- | |
+ | |
__ Load(result, | |
BuildFastArrayOperand(instr->elements(), key, | |
instr->hydrogen()->key()->representation(), | |
@@ -3955,7 +3954,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { | |
__ jmp(&done, Label::kNear); | |
__ bind(&packed_case); | |
} | |
- | |
+ | |
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); | |
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); | |
__ bind(&done); | |
@@ -4122,7 +4121,6 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { | |
DCHECK(ToRegister(instr->context()).is(rsi)); | |
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); | |
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); | |
- | |
if (instr->hydrogen()->HasVectorAndSlot()) { | |
EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr); | |
} | |
@@ -5369,7 +5367,6 @@ void LCodeGen::DoAllocate(LAllocate* instr) { | |
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); | |
flags = static_cast<AllocationFlags>(flags | PRETENURE); | |
} | |
- | |
if (instr->size()->IsConstantOperand()) { | |
int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); | |
if (size <= Page::kMaxRegularHeapObjectSize) { | |
@@ -5430,7 +5427,6 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) { | |
flags = AllocateTargetSpace::update(flags, NEW_SPACE); | |
} | |
__ Push(Smi::FromInt(flags)); | |
- | |
CallRuntimeFromDeferred( | |
Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); | |
__ StoreToSafepointRegisterSlot(result, rax); | |
@@ -5532,11 +5528,11 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { | |
Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) { | |
- Label* true_label = instr->TrueLabel(chunk_); | |
- Label* false_label = instr->FalseLabel(chunk_); | |
+ Label* true_label = instr->TrueLabel(chunk()); | |
+ Label* false_label = instr->FalseLabel(chunk()); | |
Handle<String> type_name = instr->type_literal(); | |
- int left_block = instr->TrueDestination(chunk_); | |
- int right_block = instr->FalseDestination(chunk_); | |
+ int left_block = instr->TrueDestination(chunk()); | |
+ int right_block = instr->FalseDestination(chunk()); | |
int next_block = GetNextEmittedBlock(); | |
Label::Distance true_distance = left_block == next_block ? Label::kNear | |
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h | |
index d5ca006..4d83fc5 100644 | |
--- a/src/x64/lithium-codegen-x64.h | |
+++ b/src/x64/lithium-codegen-x64.h | |
@@ -73,14 +73,8 @@ class LCodeGen: public LCodeGenBase { | |
Handle<Object> ToHandle(LConstantOperand* op) const; | |
Operand ToOperand(LOperand* op) const; | |
- // Try to generate code for the entire chunk, but it may fail if the | |
- // chunk contains constructs we cannot handle. Returns true if the | |
- // code generation attempt succeeded. | |
- bool GenerateCode(); | |
- | |
- // Finish the code by setting stack height, safepoint, and bailout | |
- // information on it. | |
- void FinishCode(Handle<Code> code); | |
+ bool GenerateCode() override; | |
+ void FinishCode(Handle<Code> code) override; | |
// Deferred code support. | |
void DoDeferredNumberTagD(LNumberTagD* instr); | |
@@ -119,9 +113,7 @@ class LCodeGen: public LCodeGenBase { | |
private: | |
LanguageMode language_mode() const { return info()->language_mode(); } | |
- LPlatformChunk* chunk() const { return chunk_; } | |
Scope* scope() const { return scope_; } | |
- HGraph* graph() const { return chunk()->graph(); } | |
XMMRegister double_scratch0() const { return xmm0; } | |
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc | |
index 722ab01..f80ce96 100644 | |
--- a/src/x64/lithium-x64.cc | |
+++ b/src/x64/lithium-x64.cc | |
@@ -446,14 +446,14 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) { | |
LPlatformChunk* LChunkBuilder::Build() { | |
DCHECK(is_unused()); | |
chunk_ = new(zone()) LPlatformChunk(info(), graph()); | |
- LPhase phase("L_Building chunk", chunk_); | |
+ LPhase phase("L_Building chunk", chunk()); | |
status_ = BUILDING; | |
// If compiling for OSR, reserve space for the unoptimized frame, | |
// which will be subsumed into this frame. | |
if (graph()->has_osr()) { | |
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) { | |
- chunk_->GetNextSpillIndex(GENERAL_REGISTERS); | |
+ chunk()->GetNextSpillIndex(GENERAL_REGISTERS); | |
} | |
} | |
@@ -465,7 +465,7 @@ LPlatformChunk* LChunkBuilder::Build() { | |
if (is_aborted()) return NULL; | |
} | |
status_ = DONE; | |
- return chunk_; | |
+ return chunk(); | |
} | |
@@ -509,7 +509,7 @@ LOperand* LChunkBuilder::UseTempRegister(HValue* value) { | |
LOperand* LChunkBuilder::UseTempRegisterOrConstant(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseTempRegister(value); | |
} | |
@@ -527,40 +527,40 @@ LOperand* LChunkBuilder::UseAtStart(HValue* value) { | |
LOperand* LChunkBuilder::UseOrConstant(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: Use(value); | |
} | |
LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseAtStart(value); | |
} | |
LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseRegister(value); | |
} | |
LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseRegisterAtStart(value); | |
} | |
LOperand* LChunkBuilder::UseConstant(HValue* value) { | |
- return chunk_->DefineConstantOperand(HConstant::cast(value)); | |
+ return chunk()->DefineConstantOperand(HConstant::cast(value)); | |
} | |
LOperand* LChunkBuilder::UseAny(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: Use(value, new(zone()) LUnallocated(LUnallocated::ANY)); | |
} | |
@@ -724,7 +724,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, | |
bool does_deopt = false; | |
if (right_value->IsConstant()) { | |
HConstant* constant = HConstant::cast(right_value); | |
- right = chunk_->DefineConstantOperand(constant); | |
+ right = chunk()->DefineConstantOperand(constant); | |
constant_value = constant->Integer32Value() & 0x1f; | |
if (SmiValuesAre31Bits() && instr->representation().IsSmi() && | |
constant_value > 0) { | |
@@ -834,7 +834,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { | |
argument_count_ = pred->argument_count(); | |
} | |
HInstruction* current = block->first(); | |
- int start = chunk_->instructions()->length(); | |
+ int start = chunk()->instructions()->length(); | |
while (current != NULL && !is_aborted()) { | |
// Code for constants in registers is generated lazily. | |
if (!current->EmitAtUses()) { | |
@@ -842,7 +842,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { | |
} | |
current = current->next(); | |
} | |
- int end = chunk_->instructions()->length() - 1; | |
+ int end = chunk()->instructions()->length() - 1; | |
if (end >= start) { | |
block->set_first_instruction_index(start); | |
block->set_last_instruction_index(end); | |
@@ -871,7 +871,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { | |
LInstruction* dummy = | |
new(zone()) LDummyUse(UseAny(current->OperandAt(i))); | |
dummy->set_hydrogen_value(current); | |
- chunk_->AddInstruction(dummy, current_block_); | |
+ chunk()->AddInstruction(dummy, current_block_); | |
} | |
} else { | |
HBasicBlock* successor; | |
@@ -937,7 +937,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr, | |
if (FLAG_stress_environments && !instr->HasEnvironment()) { | |
instr = AssignEnvironment(instr); | |
} | |
- chunk_->AddInstruction(instr, current_block_); | |
+ chunk()->AddInstruction(instr, current_block_); | |
if (instr->IsCall() || instr->IsPrologue()) { | |
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val; | |
@@ -948,7 +948,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr, | |
} | |
LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout()); | |
bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout); | |
- chunk_->AddInstruction(bailout, current_block_); | |
+ chunk()->AddInstruction(bailout, current_block_); | |
} | |
} | |
@@ -2188,7 +2188,7 @@ void LChunkBuilder::FindDehoistedKeyDefinitions(HValue* candidate) { | |
// points and should not invoke this function. We can't use STATIC_ASSERT | |
// here as the pointer size is 32-bit for x32. | |
DCHECK(kPointerSize == kInt64Size); | |
- BitVector* dehoisted_key_ids = chunk_->GetDehoistedKeyIds(); | |
+ BitVector* dehoisted_key_ids = chunk()->GetDehoistedKeyIds(); | |
if (dehoisted_key_ids->Contains(candidate->id())) return; | |
dehoisted_key_ids->Add(candidate->id()); | |
if (!candidate->IsPhi()) return; | |
@@ -2439,9 +2439,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { | |
} else { | |
val = UseRegister(instr->value()); | |
} | |
- | |
// We only need a scratch register if we have a write barrier or we | |
// have a store into the properties array (not in-object-property). | |
+ | |
LOperand* temp = (!is_in_object || needs_write_barrier || | |
needs_write_barrier_for_map) ? TempRegister() : NULL; | |
@@ -2671,7 +2671,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { | |
inner->BindContext(instr->closure_context()); | |
inner->set_entry(instr); | |
current_block_->UpdateEnvironment(inner); | |
- chunk_->AddInlinedFunction(instr->shared()); | |
+ chunk()->AddInlinedFunction(instr->shared()); | |
return NULL; | |
} | |
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h | |
index 42d5a54..8d0da30 100644 | |
--- a/src/x64/lithium-x64.h | |
+++ b/src/x64/lithium-x64.h | |
@@ -2730,10 +2730,8 @@ class LChunkBuilder final : public LChunkBuilderBase { | |
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) | |
: LChunkBuilderBase(info, graph), | |
current_instruction_(NULL), | |
- current_block_(NULL), | |
- next_block_(NULL), | |
allocator_(allocator) {} | |
- | |
+ ~LChunkBuilder() override {} | |
// Build the sequence for the graph. | |
LPlatformChunk* Build(); | |
@@ -2854,8 +2852,6 @@ class LChunkBuilder final : public LChunkBuilderBase { | |
void FindDehoistedKeyDefinitions(HValue* candidate); | |
HInstruction* current_instruction_; | |
- HBasicBlock* current_block_; | |
- HBasicBlock* next_block_; | |
LAllocator* allocator_; | |
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder); | |
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc | |
index bde32be..32307cf 100644 | |
--- a/src/x64/macro-assembler-x64.cc | |
+++ b/src/x64/macro-assembler-x64.cc | |
@@ -3479,6 +3479,7 @@ void MacroAssembler::InvokeCode(Register code, | |
call_wrapper.AfterCall(); | |
} else { | |
DCHECK(flag == JUMP_FUNCTION); | |
+ movp(rbx, Immediate(0)); //Not a Osr Entry | |
jmp(code); | |
} | |
bind(&done); | |
diff --git a/src/x87/lithium-x87.cc b/src/x87/lithium-x87.cc | |
index cb429b2..b73fae5 100644 | |
--- a/src/x87/lithium-x87.cc | |
+++ b/src/x87/lithium-x87.cc | |
@@ -462,12 +462,12 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) { | |
LPlatformChunk* LChunkBuilder::Build() { | |
DCHECK(is_unused()); | |
chunk_ = new(zone()) LPlatformChunk(info(), graph()); | |
- LPhase phase("L_Building chunk", chunk_); | |
+ LPhase phase("L_Building chunk", chunk()); | |
status_ = BUILDING; | |
// Reserve the first spill slot for the state of dynamic alignment. | |
if (info()->IsOptimizing()) { | |
- int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS); | |
+ int alignment_state_index = chunk()->GetNextSpillIndex(GENERAL_REGISTERS); | |
DCHECK_EQ(alignment_state_index, 0); | |
USE(alignment_state_index); | |
} | |
@@ -476,7 +476,7 @@ LPlatformChunk* LChunkBuilder::Build() { | |
// which will be subsumed into this frame. | |
if (graph()->has_osr()) { | |
for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) { | |
- chunk_->GetNextSpillIndex(GENERAL_REGISTERS); | |
+ chunk()->GetNextSpillIndex(GENERAL_REGISTERS); | |
} | |
} | |
@@ -488,7 +488,7 @@ LPlatformChunk* LChunkBuilder::Build() { | |
if (is_aborted()) return NULL; | |
} | |
status_ = DONE; | |
- return chunk_; | |
+ return chunk(); | |
} | |
@@ -544,14 +544,14 @@ static inline bool CanBeImmediateConstant(HValue* value) { | |
LOperand* LChunkBuilder::UseOrConstant(HValue* value) { | |
return CanBeImmediateConstant(value) | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: Use(value); | |
} | |
LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { | |
return CanBeImmediateConstant(value) | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseAtStart(value); | |
} | |
@@ -559,33 +559,33 @@ LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { | |
LOperand* LChunkBuilder::UseFixedOrConstant(HValue* value, | |
Register fixed_register) { | |
return CanBeImmediateConstant(value) | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseFixed(value, fixed_register); | |
} | |
LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { | |
return CanBeImmediateConstant(value) | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseRegister(value); | |
} | |
LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { | |
return CanBeImmediateConstant(value) | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: UseRegisterAtStart(value); | |
} | |
LOperand* LChunkBuilder::UseConstant(HValue* value) { | |
- return chunk_->DefineConstantOperand(HConstant::cast(value)); | |
+ return chunk()->DefineConstantOperand(HConstant::cast(value)); | |
} | |
LOperand* LChunkBuilder::UseAny(HValue* value) { | |
return value->IsConstant() | |
- ? chunk_->DefineConstantOperand(HConstant::cast(value)) | |
+ ? chunk()->DefineConstantOperand(HConstant::cast(value)) | |
: Use(value, new(zone()) LUnallocated(LUnallocated::ANY)); | |
} | |
@@ -742,7 +742,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, | |
bool does_deopt = false; | |
if (right_value->IsConstant()) { | |
HConstant* constant = HConstant::cast(right_value); | |
- right = chunk_->DefineConstantOperand(constant); | |
+ right = chunk()->DefineConstantOperand(constant); | |
constant_value = constant->Integer32Value() & 0x1f; | |
// Left shifts can deoptimize if we shift by > 0 and the result cannot be | |
// truncated to smi. | |
@@ -850,7 +850,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { | |
argument_count_ = pred->argument_count(); | |
} | |
HInstruction* current = block->first(); | |
- int start = chunk_->instructions()->length(); | |
+ int start = chunk()->instructions()->length(); | |
while (current != NULL && !is_aborted()) { | |
// Code for constants in registers is generated lazily. | |
if (!current->EmitAtUses()) { | |
@@ -858,7 +858,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { | |
} | |
current = current->next(); | |
} | |
- int end = chunk_->instructions()->length() - 1; | |
+ int end = chunk()->instructions()->length() - 1; | |
if (end >= start) { | |
block->set_first_instruction_index(start); | |
block->set_last_instruction_index(end); | |
@@ -887,7 +887,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { | |
LInstruction* dummy = | |
new(zone()) LDummyUse(UseAny(current->OperandAt(i))); | |
dummy->set_hydrogen_value(current); | |
- chunk_->AddInstruction(dummy, current_block_); | |
+ chunk()->AddInstruction(dummy, current_block_); | |
} | |
} else { | |
HBasicBlock* successor; | |
@@ -900,7 +900,7 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { | |
if (!current->IsGoto()) { | |
LClobberDoubles* clobber = new (zone()) LClobberDoubles(isolate()); | |
clobber->set_hydrogen_value(current); | |
- chunk_->AddInstruction(clobber, current_block_); | |
+ chunk()->AddInstruction(clobber, current_block_); | |
} | |
instr = new(zone()) LGoto(successor); | |
} else { | |
@@ -969,9 +969,9 @@ void LChunkBuilder::AddInstruction(LInstruction* instr, | |
// insert a fpu register barrier right before. | |
LClobberDoubles* clobber = new(zone()) LClobberDoubles(isolate()); | |
clobber->set_hydrogen_value(hydrogen_val); | |
- chunk_->AddInstruction(clobber, current_block_); | |
+ chunk()->AddInstruction(clobber, current_block_); | |
} | |
- chunk_->AddInstruction(instr, current_block_); | |
+ chunk()->AddInstruction(instr, current_block_); | |
if (instr->IsCall() || instr->IsPrologue()) { | |
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val; | |
@@ -982,7 +982,7 @@ void LChunkBuilder::AddInstruction(LInstruction* instr, | |
} | |
LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout()); | |
bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout); | |
- chunk_->AddInstruction(bailout, current_block_); | |
+ chunk()->AddInstruction(bailout, current_block_); | |
} | |
} | |
@@ -2691,7 +2691,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { | |
inner->BindContext(instr->closure_context()); | |
inner->set_entry(instr); | |
current_block_->UpdateEnvironment(inner); | |
- chunk_->AddInlinedFunction(instr->shared()); | |
+ chunk()->AddInlinedFunction(instr->shared()); | |
return NULL; | |
} | |
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp | |
index c99495a..48aa65a 100644 | |
--- a/tools/gyp/v8.gyp | |
+++ b/tools/gyp/v8.gyp | |
@@ -35,6 +35,7 @@ | |
'v8_extra_library_files%': [], | |
'v8_experimental_extra_library_files%': [], | |
'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)', | |
+ 'llvm_config': 'llvm-config', | |
}, | |
'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'], | |
'targets': [ | |
@@ -81,7 +82,7 @@ | |
'conditions': [ | |
['OS=="mac"', { | |
'xcode_settings': { | |
- 'OTHER_LDFLAGS': ['-dynamiclib', '-all_load'] | |
+ 'OTHER_LDFLAGS': ['-dynamiclib', '-all_load', "-Hello",] # TODO(llvm): triggers later | |
}, | |
}], | |
['soname_version!=""', { | |
@@ -369,11 +370,29 @@ | |
}, | |
'include_dirs+': [ | |
'../..', | |
+ '<!@(<(llvm_config) --includedir)', | |
+ #'asdfasdf', TODO(llvm): this works! | |
], | |
'defines': [ | |
# TODO(jochen): Remove again after this is globally turned on. | |
'V8_IMMINENT_DEPRECATION_WARNINGS', | |
], | |
+ 'cflags_cc+': [ | |
+ "<!@(<(llvm_config) --cxxflags)", | |
+ ], | |
+ 'cflags+': [ | |
+ ], | |
+# 'defines': [ # works | |
+# "__STDC_LIMIT_MACROS=1", | |
+# "__STDC_CONSTANT_MACROS=1", | |
+# ], | |
+ "ldflags+": [ | |
+ "<!@(<(llvm_config) --cxxflags --ldflags --libs core executionengine mcjit interpreter analysis ipo native --system-libs)", | |
+ ], | |
+# "libraries": [ | |
+# "<!@(<(llvm_config) --ldflags --system-libs)", | |
+# "<!@(<(llvm_config) --libs core mcjit native)" | |
+# ], | |
'sources': [ ### gcmole(all) ### | |
'../../include/v8-debug.h', | |
'../../include/v8-platform.h', | |
@@ -841,6 +860,17 @@ | |
'../../src/lithium.cc', | |
'../../src/lithium.h', | |
'../../src/lithium-inl.h', | |
+ '../../src/llvm/llvm-chunk.cc', | |
+ '../../src/llvm/llvm-chunk.h', | |
+ '../../src/llvm/llvm-headers.h', | |
+ '../../src/llvm/llvm-stackmaps.cc', | |
+ '../../src/llvm/llvm-stackmaps.h', | |
+ '../../src/llvm/mcjit-memory-manager.cc', | |
+ '../../src/llvm/mcjit-memory-manager.h', | |
+ '../../src/llvm/pass-normalize-phis.cc', | |
+ '../../src/llvm/pass-normalize-phis.h', | |
+ '../../src/llvm/pass-rewrite-safepoints.cc', | |
+ '../../src/llvm/pass-rewrite-safepoints.h', | |
'../../src/log-inl.h', | |
'../../src/log-utils.cc', | |
'../../src/log-utils.h', | |
@@ -848,6 +878,8 @@ | |
'../../src/log.h', | |
'../../src/lookup.cc', | |
'../../src/lookup.h', | |
+ '../../src/low-chunk.cc', | |
+ '../../src/low-chunk.h', | |
'../../src/macro-assembler.h', | |
'../../src/messages.cc', | |
'../../src/messages.h', | |
@@ -1449,6 +1481,7 @@ | |
}, | |
'include_dirs+': [ | |
'../..', | |
+ 'Hello2', | |
], | |
'sources': [ | |
'../../src/base/adapters.h', | |
@@ -1515,7 +1548,8 @@ | |
'link_settings': { | |
'libraries': [ | |
'-ldl', | |
- '-lrt' | |
+ '-lrt', | |
+ '<!@(<(llvm_config) --ldflags --libs core mcjit ipo native --system-libs)', #TODO(llvm): remove redundant if any | |
], | |
}, | |
}, { |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/include/llvm/CodeGen/GCs.h b/include/llvm/CodeGen/GCs.h | |
index 5207f80..e70f5d6 100644 | |
--- a/include/llvm/CodeGen/GCs.h | |
+++ b/include/llvm/CodeGen/GCs.h | |
@@ -41,6 +41,9 @@ void linkErlangGCPrinter(); | |
void linkShadowStackGC(); | |
void linkStatepointExampleGC(); | |
+ | |
+/// Creates a GC representing the V8 JavaScript engine's garbage collector. | |
+void linkLLV8GC(); | |
} | |
#endif | |
diff --git a/include/llvm/CodeGen/LinkAllCodegenComponents.h b/include/llvm/CodeGen/LinkAllCodegenComponents.h | |
index fee131e..f2ae4c5 100644 | |
--- a/include/llvm/CodeGen/LinkAllCodegenComponents.h | |
+++ b/include/llvm/CodeGen/LinkAllCodegenComponents.h | |
@@ -41,6 +41,7 @@ namespace { | |
llvm::linkErlangGC(); | |
llvm::linkShadowStackGC(); | |
llvm::linkStatepointExampleGC(); | |
+ llvm::linkLLV8GC(); | |
(void) llvm::createBURRListDAGScheduler(nullptr, | |
llvm::CodeGenOpt::Default); | |
diff --git a/include/llvm/CodeGen/MachineFrameInfo.h b/include/llvm/CodeGen/MachineFrameInfo.h | |
index d1da6c3..3e80c02 100644 | |
--- a/include/llvm/CodeGen/MachineFrameInfo.h | |
+++ b/include/llvm/CodeGen/MachineFrameInfo.h | |
@@ -160,6 +160,7 @@ class MachineFrameInfo { | |
/// above. It then updates StackSize to contain the number of bytes that need | |
/// to be allocated on entry to the function. | |
uint64_t StackSize; | |
+ uint64_t NoParamsStackSize; | |
/// The amount that a frame offset needs to be adjusted to | |
/// have the actual offset from the stack/frame pointer. The exact usage of | |
@@ -255,6 +256,7 @@ public: | |
: StackAlignment(StackAlign), StackRealignable(isStackRealign), | |
RealignOption(RealignOpt) { | |
StackSize = NumFixedObjects = OffsetAdjustment = MaxAlignment = 0; | |
+ NoParamsStackSize = 0; | |
HasVarSizedObjects = false; | |
FrameAddressTaken = false; | |
ReturnAddressTaken = false; | |
@@ -445,6 +447,11 @@ public: | |
/// Set the size of the stack. | |
void setStackSize(uint64_t Size) { StackSize = Size; } | |
+ /// Same as getStackSize(), only does not consider stack slots used | |
+ /// for parameter passing to callees and alignment. | |
+ uint64_t getNoParamsStackSize() const { return NoParamsStackSize; } | |
+ void setNoParamsStackSize(uint64_t Size) { NoParamsStackSize = Size; } | |
+ | |
/// Estimate and return the size of the stack frame. | |
unsigned estimateStackSize(const MachineFunction &MF) const; | |
diff --git a/include/llvm/IR/CallingConv.h b/include/llvm/IR/CallingConv.h | |
index f7a45af..0eebb91 100644 | |
--- a/include/llvm/IR/CallingConv.h | |
+++ b/include/llvm/IR/CallingConv.h | |
@@ -147,7 +147,62 @@ namespace CallingConv { | |
/// \brief MSVC calling convention that passes vectors and vector aggregates | |
/// in SSE registers. | |
- X86_VectorCall = 80 | |
+ X86_VectorCall = 80, | |
+ | |
+ // Calling convention the V8 JavaScript JIT engine uses internally. | |
+ X86_64_V8 = 81, | |
+ | |
+ // Calling convention for calling V8's CEntryStubs | |
+ // (which call runtime functions) | |
+ X86_64_V8_CES = 82, | |
+ | |
+ // The following are the calling conventions | |
+ // for different kinds of V8 code stubs. | |
+ X86_64_V8_S1 = 83, | |
+ | |
+ // CallingConv for V8 MathPow call | |
+ X86_64_V8_S2 = 84, | |
+ | |
+ // RecordWriteStub | |
+ X86_64_V8_RWS = 85, | |
+ | |
+ // CallNewArray | |
+ X86_64_V8_S3 = 86, | |
+ | |
+ // StringAdd | |
+ X86_64_V8_S4 = 87, | |
+ | |
+ X86_64_V8_S5 = 88, | |
+ | |
+ //DoCallFunction | |
+ X86_64_V8_S6 = 89, | |
+ | |
+ //DoStoreNamedGeneric | |
+ X86_64_V8_S7 = 90, | |
+ | |
+ //DoFunctionLiteral | |
+ X86_64_V8_S8 = 91, | |
+ | |
+ //LoadNamedGeneric | |
+ X86_64_V8_S9 = 92, | |
+ | |
+ //DoStringCompareAndBranch | |
+ X86_64_V8_S10 = 93, | |
+ | |
+ X86_64_V8_S11 = 94, | |
+ | |
+ //DoCallStub | |
+ X86_64_V8_Stub = 95, | |
+ | |
+ // Extended JS CALL | |
+ X86_64_V8_E = 96, | |
+ | |
+ //DoMaybeGrowElements | |
+ X86_64_V8_S12 = 97, | |
+ | |
+ //CallFunction | |
+ X86_64_V8_S13 = 98, | |
+ | |
}; | |
} // End CallingConv namespace | |
diff --git a/lib/AsmParser/LLLexer.cpp b/lib/AsmParser/LLLexer.cpp | |
index b166c17..d299ffa 100644 | |
--- a/lib/AsmParser/LLLexer.cpp | |
+++ b/lib/AsmParser/LLLexer.cpp | |
@@ -584,6 +584,15 @@ lltok::Kind LLLexer::LexIdentifier() { | |
KEYWORD(x86_64_win64cc); | |
KEYWORD(webkit_jscc); | |
KEYWORD(anyregcc); | |
+ KEYWORD(x86_64_v8cc); | |
+ KEYWORD(x86_64_v8_cescc); | |
+ KEYWORD(x86_64_v8_rwscc); | |
+ KEYWORD(x86_64_v8_s1cc); | |
+ KEYWORD(x86_64_v8_s2cc); | |
+ KEYWORD(x86_64_v8_s3cc); | |
+ KEYWORD(x86_64_v8_s4cc); | |
+ KEYWORD(x86_64_v8_s5cc); | |
+ KEYWORD(x86_64_v8_s6cc); | |
KEYWORD(preserve_mostcc); | |
KEYWORD(preserve_allcc); | |
KEYWORD(ghccc); | |
diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp | |
index b2f7af6..caa18e5 100644 | |
--- a/lib/AsmParser/LLParser.cpp | |
+++ b/lib/AsmParser/LLParser.cpp | |
@@ -1527,6 +1527,10 @@ bool LLParser::ParseOptionalDLLStorageClass(unsigned &Res) { | |
/// ::= 'x86_64_win64cc' | |
/// ::= 'webkit_jscc' | |
/// ::= 'anyregcc' | |
+/// ::= 'x86_64_v8cc' | |
+/// ::= 'x86_64_v8_cescc' | |
+/// ::= 'x86_64_v8_rwscc' | |
+/// ::= 'x86_64_v8_s1cc' | |
/// ::= 'preserve_mostcc' | |
/// ::= 'preserve_allcc' | |
/// ::= 'ghccc' | |
@@ -1555,6 +1559,15 @@ bool LLParser::ParseOptionalCallingConv(unsigned &CC) { | |
case lltok::kw_x86_64_win64cc: CC = CallingConv::X86_64_Win64; break; | |
case lltok::kw_webkit_jscc: CC = CallingConv::WebKit_JS; break; | |
case lltok::kw_anyregcc: CC = CallingConv::AnyReg; break; | |
+ case lltok::kw_x86_64_v8cc: CC = CallingConv::X86_64_V8; break; | |
+ case lltok::kw_x86_64_v8_cescc:CC = CallingConv::X86_64_V8_CES; break; | |
+ case lltok::kw_x86_64_v8_rwscc:CC = CallingConv::X86_64_V8_RWS; break; | |
+ case lltok::kw_x86_64_v8_s1cc: CC = CallingConv::X86_64_V8_S1; break; | |
+ case lltok::kw_x86_64_v8_s2cc: CC = CallingConv::X86_64_V8_S2; break; | |
+ case lltok::kw_x86_64_v8_s3cc: CC = CallingConv::X86_64_V8_S3; break; | |
+ case lltok::kw_x86_64_v8_s4cc: CC = CallingConv::X86_64_V8_S4; break; | |
+ case lltok::kw_x86_64_v8_s5cc: CC = CallingConv::X86_64_V8_S5; break; | |
+ case lltok::kw_x86_64_v8_s6cc: CC = CallingConv::X86_64_V8_S6; break; | |
case lltok::kw_preserve_mostcc:CC = CallingConv::PreserveMost; break; | |
case lltok::kw_preserve_allcc: CC = CallingConv::PreserveAll; break; | |
case lltok::kw_ghccc: CC = CallingConv::GHC; break; | |
diff --git a/lib/AsmParser/LLToken.h b/lib/AsmParser/LLToken.h | |
index b83ca2c..91761df 100644 | |
--- a/lib/AsmParser/LLToken.h | |
+++ b/lib/AsmParser/LLToken.h | |
@@ -95,6 +95,15 @@ namespace lltok { | |
kw_spir_kernel, kw_spir_func, | |
kw_x86_64_sysvcc, kw_x86_64_win64cc, | |
kw_webkit_jscc, kw_anyregcc, | |
+ kw_x86_64_v8cc, | |
+ kw_x86_64_v8_cescc, | |
+ kw_x86_64_v8_rwscc, | |
+ kw_x86_64_v8_s1cc, | |
+ kw_x86_64_v8_s2cc, | |
+ kw_x86_64_v8_s3cc, | |
+ kw_x86_64_v8_s4cc, | |
+ kw_x86_64_v8_s5cc, | |
+ kw_x86_64_v8_s6cc, | |
kw_preserve_mostcc, kw_preserve_allcc, | |
kw_ghccc, | |
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp | |
index fdba2a9..9f2cf3a 100644 | |
--- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp | |
+++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp | |
@@ -598,7 +598,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) { | |
/// function. | |
void AsmPrinter::EmitFunctionHeader() { | |
// Print out constants referenced by the function | |
- EmitConstantPool(); | |
+// EmitConstantPool(); | |
// Print the 'header' of function. | |
const Function *F = MF->getFunction(); | |
@@ -999,6 +999,7 @@ void AsmPrinter::EmitFunctionBody() { | |
MMI->EndFunction(); | |
OutStreamer->AddBlankLine(); | |
+ EmitConstantPool(); | |
} | |
/// \brief Compute the number of Global Variables that uses a Constant. | |
@@ -1303,8 +1304,13 @@ void AsmPrinter::EmitConstantPool() { | |
if (!CPE.isMachineConstantPoolEntry()) | |
C = CPE.Val.ConstVal; | |
- MCSection *S = | |
- getObjFileLowering().getSectionForConstant(getDataLayout(), Kind, C); | |
+ MCSection *S = nullptr; | |
+ if (MF->getFunction()->getAttributes().hasAttribute( | |
+ AttributeSet::FunctionIndex, "put-constantpool-in-fn-section")) { | |
+ S = getObjFileLowering().getTextSection(); | |
+ } else { | |
+ S = getObjFileLowering().getSectionForConstant(getDataLayout(), Kind, C); | |
+ } | |
// The number of sections are small, just do a linear search from the | |
// last section to the first. | |
diff --git a/lib/CodeGen/LLV8GC.cpp b/lib/CodeGen/LLV8GC.cpp | |
new file mode 100644 | |
index 0000000..524fa69 | |
--- /dev/null | |
+++ b/lib/CodeGen/LLV8GC.cpp | |
@@ -0,0 +1,37 @@ | |
+/* | |
+ * llvm-gc-strategy.cc | |
+ * | |
+ * Created on: Sep 14, 2015 | |
+ * Author: vlad | |
+ */ | |
+ | |
+#include "llvm/CodeGen/GCs.h" | |
+#include "llvm/CodeGen/GCStrategy.h" | |
+//#include "llvm/IR/DerivedTypes.h" | |
+//#include "llvm/IR/Value.h" | |
+ | |
+namespace { | |
+ | |
+class LLV8GC : public llvm::GCStrategy { | |
+public: | |
+ LLV8GC() { | |
+ UseStatepoints = true; | |
+ // These options are all gc.root specific, we specify them so that the | |
+ // gc.root lowering code doesn't run. | |
+ InitRoots = false; | |
+ NeededSafePoints = 0; | |
+ UsesMetadata = false; | |
+ CustomRoots = false; | |
+ } | |
+ llvm::Optional<bool> isGCManagedPointer(const llvm::Value *V) const override { | |
+ return true; | |
+ } | |
+}; | |
+ | |
+} // namespace | |
+ | |
+static llvm::GCRegistry::Add<LLV8GC> X("v8-gc", "Dummy GC strategy for V8"); | |
+ | |
+namespace llvm { | |
+void linkLLV8GC() {} | |
+} | |
diff --git a/lib/CodeGen/MachineRegisterInfo.cpp b/lib/CodeGen/MachineRegisterInfo.cpp | |
index 574eefe..3364ebd 100644 | |
--- a/lib/CodeGen/MachineRegisterInfo.cpp | |
+++ b/lib/CodeGen/MachineRegisterInfo.cpp | |
@@ -477,6 +477,10 @@ static bool isNoReturnDef(const MachineOperand &MO) { | |
} | |
bool MachineRegisterInfo::isPhysRegModified(unsigned PhysReg) const { | |
+ // FIXME: it's a hack to always push rsi and rdi. | |
+ if (MF->getFunction()->getCallingConv() == CallingConv::X86_64_V8) | |
+ if (PhysReg == 39 || PhysReg == 43) | |
+ return true; | |
if (UsedPhysRegMask.test(PhysReg)) | |
return true; | |
const TargetRegisterInfo *TRI = getTargetRegisterInfo(); | |
diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp | |
index f169f48..a6b7272 100644 | |
--- a/lib/CodeGen/PrologEpilogInserter.cpp | |
+++ b/lib/CodeGen/PrologEpilogInserter.cpp | |
@@ -736,6 +736,7 @@ void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) { | |
AdjustStackOffset(MFI, *I, StackGrowsDown, Offset, MaxAlign); | |
} | |
+ MFI->setNoParamsStackSize(Offset - LocalAreaOffset); | |
if (!TFI.targetHandlesStackFrameRounding()) { | |
// If we have reserved argument space for call sites in the function | |
// immediately on entry to the current function, count it as part of the | |
@@ -924,8 +925,7 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn, | |
unsigned Reg; | |
MachineOperand &Offset = MI->getOperand(i + 1); | |
const unsigned refOffset = | |
- TFI->getFrameIndexReferenceFromSP(Fn, MI->getOperand(i).getIndex(), | |
- Reg); | |
+ TFI->getFrameIndexReference(Fn, MI->getOperand(i).getIndex(), Reg); | |
Offset.setImm(Offset.getImm() + refOffset); | |
MI->getOperand(i).ChangeToRegister(Reg, false /*isDef*/); | |
diff --git a/lib/CodeGen/StackMaps.cpp b/lib/CodeGen/StackMaps.cpp | |
index b3cd8b3..5026066 100644 | |
--- a/lib/CodeGen/StackMaps.cpp | |
+++ b/lib/CodeGen/StackMaps.cpp | |
@@ -338,7 +338,7 @@ void StackMaps::recordStackMapOpers(const MachineInstr &MI, uint64_t ID, | |
bool HasDynamicFrameSize = | |
MFI->hasVarSizedObjects() || RegInfo->needsStackRealignment(*(AP.MF)); | |
FnStackSize[AP.CurrentFnSym] = | |
- HasDynamicFrameSize ? UINT64_MAX : MFI->getStackSize(); | |
+ HasDynamicFrameSize ? UINT64_MAX : MFI->getNoParamsStackSize(); | |
} | |
void StackMaps::recordStackMap(const MachineInstr &MI) { | |
diff --git a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp | |
index 0f46571..74431d2 100644 | |
--- a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp | |
+++ b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp | |
@@ -345,7 +345,8 @@ bool TargetLoweringObjectFileELF::shouldPutJumpTableInFunctionSection( | |
bool UsesLabelDifference, const Function &F) const { | |
// We can always create relative relocations, so use another section | |
// that can be marked non-executable. | |
- return false; | |
+ return F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, | |
+ "put-jumptable-in-fn-section"); | |
} | |
/// Given a mergeable constant with the specified size and relocation | |
diff --git a/lib/IR/AsmWriter.cpp b/lib/IR/AsmWriter.cpp | |
index 3aeabf8..1d08bc4 100644 | |
--- a/lib/IR/AsmWriter.cpp | |
+++ b/lib/IR/AsmWriter.cpp | |
@@ -300,6 +300,15 @@ static void PrintCallingConv(unsigned cc, raw_ostream &Out) { | |
case CallingConv::Fast: Out << "fastcc"; break; | |
case CallingConv::Cold: Out << "coldcc"; break; | |
case CallingConv::WebKit_JS: Out << "webkit_jscc"; break; | |
+ case CallingConv::X86_64_V8: Out << "x86_64_v8cc"; break; | |
+ case CallingConv::X86_64_V8_CES: Out << "x86_64_v8_cescc"; break; | |
+ case CallingConv::X86_64_V8_RWS: Out << "x86_64_v8_rwscc"; break; | |
+ case CallingConv::X86_64_V8_S1: Out << "x86_64_v8_s1cc"; break; | |
+ case CallingConv::X86_64_V8_S2: Out << "x86_64_v8_s2cc"; break; | |
+ case CallingConv::X86_64_V8_S3: Out << "x86_64_v8_s3cc"; break; | |
+ case CallingConv::X86_64_V8_S4: Out << "x86_64_v8_s4cc"; break; | |
+ case CallingConv::X86_64_V8_S5: Out << "x86_64_v8_s5cc"; break; | |
+ case CallingConv::X86_64_V8_S6: Out << "x86_64_v8_s6cc"; break; | |
case CallingConv::AnyReg: Out << "anyregcc"; break; | |
case CallingConv::PreserveMost: Out << "preserve_mostcc"; break; | |
case CallingConv::PreserveAll: Out << "preserve_allcc"; break; | |
diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td | |
index 8f88888..e8da267 100644 | |
--- a/lib/Target/X86/X86CallingConv.td | |
+++ b/lib/Target/X86/X86CallingConv.td | |
@@ -191,6 +191,95 @@ def RetCC_X86_64_WebKit_JS : CallingConv<[ | |
CCIfType<[i64], CCAssignToReg<[RAX]>> | |
]>; | |
+// X86-64 V8 return-value convention. | |
+// It looks like WebKit but it feels right to duplicate | |
+// in case the WebKit convention potentially changes | |
+def RetCC_X86_64_V8 : CallingConv<[ | |
+ // Promote all types to i64 | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ | |
+ // Return: RAX | |
+ CCIfType<[i64], CCAssignToReg<[RAX]>> | |
+]>; | |
+ | |
+def RetCC_X86_64_V8_E : CallingConv<[ | |
+ // Promote all types to i64 | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ | |
+ // Return: RAX | |
+ CCIfType<[i64], CCAssignToReg<[RAX]>> | |
+]>; | |
+ | |
+ | |
+def RetCC_X86_64_V8_CES : CallingConv<[ | |
+ // Return: RAX // TODO: sometimes it's 2 values | |
+ CCIfType<[i64], CCAssignToReg<[RAX]>> | |
+]>; | |
+ | |
+def RetCC_X86_64_V8_RWS : CallingConv<[ | |
+ // FIXME: it's void | |
+ CCIfType<[i64], CCAssignToReg<[RAX]>> | |
+]>; | |
+ | |
+def RetCC_X86_64_V8_S2 : CallingConv<[ | |
+ CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToReg<[XMM3]>> | |
+]>; | |
+ | |
+def RetCC_X86_64_V8_S3 : CallingConv<[ | |
+ // Return: RAX | |
+ CCIfType<[i64], CCAssignToReg<[RAX]>> | |
+]>; | |
+ | |
+def RetCC_X86_64_V8_S4 : CallingConv<[ | |
+ // Return: RAX | |
+ CCIfType<[i64], CCAssignToReg<[RAX]>> | |
+]>; | |
+ | |
+def RetCC_X86_64_V8_S5 : CallingConv<[ | |
+ // Return: RAX | |
+ CCIfType<[i64], CCAssignToReg<[RAX]>> | |
+]>; | |
+ | |
+def RetCC_X86_64_V8_S6 : CallingConv<[ | |
+ // Return: RAX | |
+ CCIfType<[i64], CCAssignToReg<[RAX]>> | |
+]>; | |
+ | |
+def RetCC_X86_64_V8_S7 : CallingConv<[ | |
+ // Return: RAX | |
+ CCIfType<[i64], CCAssignToReg<[RAX]>> | |
+]>; | |
+ | |
+def RetCC_X86_64_V8_S8 : CallingConv<[ | |
+ // Return: RAX | |
+ CCIfType<[i64], CCAssignToReg<[RAX]>> | |
+]>; | |
+ | |
+def RetCC_X86_64_V8_S9 : CallingConv<[ | |
+ // Return: RAX | |
+ CCIfType<[i64], CCAssignToReg<[RAX]>> | |
+]>; | |
+ | |
+def RetCC_X86_64_V8_S10 : CallingConv<[ | |
+ // Return: RAX | |
+ CCIfType<[i64], CCAssignToReg<[RAX]>> | |
+]>; | |
+ | |
+def RetCC_X86_64_V8_S11 : CallingConv<[ | |
+ // Return: RAX | |
+ CCIfType<[i64], CCAssignToReg<[RAX]>> | |
+]>; | |
+ | |
+def RetCC_X86_64_V8_Stub : CallingConv<[ | |
+ // Return: RAX | |
+ CCIfType<[i64], CCAssignToReg<[RAX]>> | |
+]>; | |
+ | |
+def RetCC_X86_64_V8_S12 : CallingConv<[ | |
+ // Return: RAX | |
+ CCIfType<[i64], CCAssignToReg<[RAX]>> | |
+]>; | |
+ | |
// X86-64 AnyReg return-value convention. No explicit register is specified for | |
// the return-value. The register allocator is allowed and expected to choose | |
// any free register. | |
@@ -202,6 +291,135 @@ def RetCC_X86_64_AnyReg : CallingConv<[ | |
CCCustom<"CC_X86_AnyReg_Error"> | |
]>; | |
+def CC_X86_64_V8 : CallingConv<[ | |
+ // Promote i8/i16/i32 arguments to i64. | |
+ // Do we have such arguments? IDK | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ | |
+ // First three arguments are context, callee's JSFunction object, and OSR predicate | |
+ CCIfType<[i64], CCAssignToReg<[RSI, RDI, RBX]>>, | |
+ | |
+ // The remaining integer arguments are passed on the stack. | |
+ // 64bit integer and floating-point arguments are default-aligned and stored | |
+ // in 8 byte stack slots. | |
+ CCIfType<[i64, f64], CCAssignToStack<8, 0>> | |
+]>; | |
+ | |
+def CC_X86_64_V8_E : CallingConv<[ | |
+ // Promote i8/i16/i32 arguments to i64. | |
+ // Do we have such arguments? IDK | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ | |
+ // First three arguments are context, callee's JSFunction object, and OSR predicate | |
+ CCIfType<[i64], CCAssignToReg<[RSI, RDI, RBX, RAX]>>, | |
+ | |
+ // The remaining integer arguments are passed on the stack. | |
+ // 64bit integer and floating-point arguments are default-aligned and stored | |
+ // in 8 byte stack slots. | |
+ CCIfType<[i64, f64], CCAssignToStack<8, 0>> | |
+]>; | |
+ | |
+ | |
+def CC_X86_64_V8_CES : CallingConv<[ | |
+ // The first 3 args are: nargs for the runtime function, | |
+ // address of the runtime funciton and the current JS context | |
+ CCIfType<[i64], CCAssignToReg<[RAX, RBX, RSI]>>, | |
+ CCIfType<[i64, f64], CCAssignToStack<8, 0>> | |
+]>; | |
+ | |
+def CC_X86_64_V8_RWS : CallingConv<[ | |
+ // The first (and only) 3 args are: object, map (new value), | |
+ // dst (object's field address) | |
+ CCIfType<[i64], CCAssignToReg<[RBX, RCX, RDX]>> | |
+]>; | |
+ | |
+def CC_X86_64_V8_S1 : CallingConv<[ | |
+ CCIfType<[i64], CCAssignToReg<[RSI, RDI, RBX, RCX, RDX]>>, | |
+ CCIfType<[i64, f64], CCAssignToStack<8, 0>> | |
+]>; | |
+ | |
+def CC_X86_64_V8_S3 : CallingConv<[ | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ CCIfType<[i64], CCAssignToReg<[RSI, RDI, RAX, RBX]>>, | |
+ CCIfType<[i64, f64], CCAssignToStack<8, 0>> | |
+]>; | |
+ | |
+def CC_X86_64_V8_S4 : CallingConv<[ | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ CCIfType<[i64], CCAssignToReg<[RSI, RDX, RBX, RAX]>>, | |
+ CCIfType<[i64, f64], CCAssignToStack<8, 0>> | |
+]>; | |
+ | |
+def CC_X86_64_V8_S7 : CallingConv<[ | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ CCIfType<[i64], CCAssignToReg<[RSI, RDX, RAX, RCX]>>, | |
+ CCIfType<[i64, f64], CCAssignToStack<8, 0>> | |
+]>; | |
+ | |
+def CC_X86_64_V8_S8 : CallingConv<[ | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ CCIfType<[i64], CCAssignToReg<[RSI, RBX]>>, | |
+ CCIfType<[i64, f64], CCAssignToStack<8, 0>> | |
+]>; | |
+ | |
+def CC_X86_64_V8_S2 : CallingConv<[ | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], | |
+ CCAssignToReg<[XMM2]>>, | |
+ CCIfType<[i64], CCAssignToReg<[RDX]>>, | |
+ CCIfType<[f64, f64], CCAssignToReg<[XMM2, XMM1]>>, | |
+ CCIfType<[f64, i64], CCAssignToStack<8, 0>> | |
+]>; | |
+ | |
+def CC_X86_64_V8_S5 : CallingConv<[ | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ CCIfType<[i64], CCAssignToReg<[RSI, RDX, RCX]>>, | |
+ CCIfType<[i64, f64], CCAssignToStack<8, 0>> | |
+]>; | |
+ | |
+def CC_X86_64_V8_S6 : CallingConv<[ | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ CCIfType<[i64], CCAssignToReg<[RSI, RDI, RBX, RDX]>>, | |
+ CCIfType<[i64, f64], CCAssignToStack<8, 0>> | |
+]>; | |
+ | |
+def CC_X86_64_V8_S9 : CallingConv<[ | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ CCIfType<[i64], CCAssignToReg<[RSI, RDX, RCX, RBX, RAX]>>, | |
+ CCIfType<[i64, f64], CCAssignToStack<8, 0>> | |
+]>; | |
+ | |
+def CC_X86_64_V8_S10 : CallingConv<[ | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ CCIfType<[i64], CCAssignToReg<[RSI, RDX, RAX]>>, | |
+ CCIfType<[i64, f64], CCAssignToStack<8, 0>> | |
+]>; | |
+ | |
+def CC_X86_64_V8_S11 : CallingConv<[ | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ CCIfType<[i64], CCAssignToReg<[RSI, RAX]>>, | |
+ CCIfType<[i64, f64], CCAssignToStack<8, 0>> | |
+]>; | |
+ | |
+def CC_X86_64_V8_S12 : CallingConv<[ | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ CCIfType<[i64], CCAssignToReg<[RAX, RBX]>>, | |
+ CCIfType<[i64, f64], CCAssignToStack<8, 0>> | |
+]>; | |
+ | |
+def CC_X86_64_V8_Stub : CallingConv<[ | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ CCIfType<[i64], CCAssignToReg<[RSI]>>, | |
+ CCIfType<[i64, f64], CCAssignToStack<8, 0>> | |
+]>; | |
+def CC_X86_64_V8_S13 : CallingConv<[ | |
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, | |
+ CCIfType<[i64], CCAssignToReg<[RSI, RDI]>>, | |
+ CCIfType<[i64, f64], CCAssignToStack<8, 0>> | |
+]>; | |
+ | |
+ | |
+ | |
// This is the root return-value convention for the X86-32 backend. | |
def RetCC_X86_32 : CallingConv<[ | |
// If FastCC, use RetCC_X86_32_Fast. | |
@@ -219,6 +437,25 @@ def RetCC_X86_64 : CallingConv<[ | |
// HiPE uses RetCC_X86_64_HiPE | |
CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_64_HiPE>>, | |
+ // V8 return | |
+ CCIfCC<"CallingConv::X86_64_V8", CCDelegateTo<RetCC_X86_64_V8>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_E", CCDelegateTo<RetCC_X86_64_V8_E>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_CES", CCDelegateTo<RetCC_X86_64_V8_CES>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S1", CCDelegateTo<RetCC_X86_64_V8_CES>>, // sic | |
+ CCIfCC<"CallingConv::X86_64_V8_RWS", CCDelegateTo<RetCC_X86_64_V8_RWS>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S2", CCDelegateTo<RetCC_X86_64_V8_S2>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S3", CCDelegateTo<RetCC_X86_64_V8_S3>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S4", CCDelegateTo<RetCC_X86_64_V8_S4>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S5", CCDelegateTo<RetCC_X86_64_V8_S5>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S6", CCDelegateTo<RetCC_X86_64_V8_S6>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S7", CCDelegateTo<RetCC_X86_64_V8_S7>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S8", CCDelegateTo<RetCC_X86_64_V8_S8>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S9", CCDelegateTo<RetCC_X86_64_V8_S9>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S10", CCDelegateTo<RetCC_X86_64_V8_S10>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S11", CCDelegateTo<RetCC_X86_64_V8_S11>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S12", CCDelegateTo<RetCC_X86_64_V8_S12>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_Stub", CCDelegateTo<RetCC_X86_64_V8_Stub>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S13", CCDelegateTo<RetCC_X86_64_V8_Stub>>, | |
// Handle JavaScript calls. | |
CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<RetCC_X86_64_WebKit_JS>>, | |
CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_X86_64_AnyReg>>, | |
@@ -727,6 +964,24 @@ def CC_X86_32 : CallingConv<[ | |
// This is the root argument convention for the X86-64 backend. | |
def CC_X86_64 : CallingConv<[ | |
+ CCIfCC<"CallingConv::X86_64_V8_S1", CCDelegateTo<CC_X86_64_V8_S1>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_CES", CCDelegateTo<CC_X86_64_V8_CES>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_RWS", CCDelegateTo<CC_X86_64_V8_RWS>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S2", CCDelegateTo<CC_X86_64_V8_S2>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S3", CCDelegateTo<CC_X86_64_V8_S3>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S4", CCDelegateTo<CC_X86_64_V8_S4>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S5", CCDelegateTo<CC_X86_64_V8_S5>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S6", CCDelegateTo<CC_X86_64_V8_S6>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S7", CCDelegateTo<CC_X86_64_V8_S7>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S8", CCDelegateTo<CC_X86_64_V8_S8>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S9", CCDelegateTo<CC_X86_64_V8_S9>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S10", CCDelegateTo<CC_X86_64_V8_S10>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S11", CCDelegateTo<CC_X86_64_V8_S11>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S12", CCDelegateTo<CC_X86_64_V8_S12>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_Stub", CCDelegateTo<CC_X86_64_V8_Stub>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_S13", CCDelegateTo<CC_X86_64_V8_S13>>, | |
+ CCIfCC<"CallingConv::X86_64_V8_E", CCDelegateTo<CC_X86_64_V8_E>>, | |
+ CCIfCC<"CallingConv::X86_64_V8", CCDelegateTo<CC_X86_64_V8>>, | |
CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>, | |
CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>, | |
CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<CC_X86_64_WebKit_JS>>, | |
@@ -784,6 +1039,32 @@ def CSR_64_AllRegs_AVX : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX, RSP, | |
(sequence "YMM%u", 0, 31)), | |
(sequence "XMM%u", 0, 15))>; | |
+// RAX, RBX, RCX, RDX are caller-saved | |
+// RSI, RDI, should always pe pushed onto the stack | |
+def CSR_X86_64_V8 : CalleeSavedRegs<(add RDI, RSI)>; | |
+def CSR_X86_64_V8_E : CalleeSavedRegs<(add RDI, RSI)>; | |
+ | |
+// V8 CEntryStubs might clobber anything | |
+def CSR_X86_64_V8_CES : CalleeSavedRegs<(add RBP)>; | |
+def CSR_X86_64_V8_S1 : CalleeSavedRegs<(add RBP)>; | |
+def CSR_X86_64_V8_S2 : CalleeSavedRegs<(add RBP)>; | |
+def CSR_X86_64_V8_S3 : CalleeSavedRegs<(add RBP)>; | |
+def CSR_X86_64_V8_S4 : CalleeSavedRegs<(add RBP)>; | |
+def CSR_X86_64_V8_S5 : CalleeSavedRegs<(add RBP)>; | |
+def CSR_X86_64_V8_S6 : CalleeSavedRegs<(add RBP)>; | |
+def CSR_X86_64_V8_S7 : CalleeSavedRegs<(add RBP)>; | |
+def CSR_X86_64_V8_S8 : CalleeSavedRegs<(add RBP)>; | |
+def CSR_X86_64_V8_S9 : CalleeSavedRegs<(add RBP)>; | |
+def CSR_X86_64_V8_S10 : CalleeSavedRegs<(add RBP)>; | |
+def CSR_X86_64_V8_S11 : CalleeSavedRegs<(add RBP)>; | |
+def CSR_X86_64_V8_S12 : CalleeSavedRegs<(add RBP)>; | |
+def CSR_X86_64_V8_S13 : CalleeSavedRegs<(add RBP)>; | |
+def CSR_X86_64_V8_Stub : CalleeSavedRegs<(add RBP)>; | |
+// RCX is not saved, the rest seems to be. | |
+def CSR_X86_64_V8_RWS : CalleeSavedRegs<(add RAX, RDX, RBX, RBP, RSI, RDI, | |
+ R8, R9, R10, R11, R12, R13, R14, | |
+ R15)>; | |
+ | |
// Standard C + YMM6-15 | |
def CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, | |
R13, R14, R15, | |
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp | |
index df8de82..5350a83 100644 | |
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp | |
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp | |
@@ -798,6 +798,22 @@ bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) { | |
return false; | |
} | |
+ // This block enables rip-relative offset encoding for jump tables | |
+ // for the Large code model. | |
+ if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP && | |
+ M == CodeModel::Large) { | |
+ // Base and index reg must be 0 in order to use %rip as base. | |
+ if (AM.hasBaseOrIndexReg()) | |
+ return true; | |
+ if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { | |
+ AM.JT = J->getIndex(); | |
+ AM.SymbolFlags = J->getTargetFlags(); | |
+ if (N.getOpcode() == X86ISD::WrapperRIP) | |
+ AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64)); | |
+ return false; | |
+ } | |
+ } | |
+ | |
// Handle the case when globals fit in our immediate field: This is true for | |
// X86-32 always and X86-64 when in -mcmodel=small mode. In 64-bit | |
// mode, this only applies to a non-RIP-relative computation. | |
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp | |
index ee7c445..7a5d1a5 100644 | |
--- a/lib/Target/X86/X86ISelLowering.cpp | |
+++ b/lib/Target/X86/X86ISelLowering.cpp | |
@@ -3889,6 +3889,25 @@ bool X86::isCalleePop(CallingConv::ID CallingConv, | |
switch (CallingConv) { | |
default: | |
return false; | |
+ case CallingConv::X86_64_V8: | |
+ case CallingConv::X86_64_V8_E: | |
+ case CallingConv::X86_64_V8_S1: | |
+ case CallingConv::X86_64_V8_S2: | |
+ case CallingConv::X86_64_V8_S3: | |
+ case CallingConv::X86_64_V8_S4: | |
+ case CallingConv::X86_64_V8_S5: | |
+ case CallingConv::X86_64_V8_S6: | |
+ case CallingConv::X86_64_V8_S7: | |
+ case CallingConv::X86_64_V8_S8: | |
+ case CallingConv::X86_64_V8_S9: | |
+ case CallingConv::X86_64_V8_S10: | |
+ case CallingConv::X86_64_V8_S11: | |
+ case CallingConv::X86_64_V8_S12: | |
+ case CallingConv::X86_64_V8_CES: | |
+ case CallingConv::X86_64_V8_RWS: | |
+ case CallingConv::X86_64_V8_Stub: | |
+ case CallingConv::X86_64_V8_S13: | |
+ return true; | |
case CallingConv::X86_StdCall: | |
case CallingConv::X86_FastCall: | |
case CallingConv::X86_ThisCall: | |
@@ -11577,7 +11596,8 @@ SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { | |
CodeModel::Model M = DAG.getTarget().getCodeModel(); | |
if (Subtarget->isPICStyleRIPRel() && | |
- (M == CodeModel::Small || M == CodeModel::Kernel)) | |
+ (M == CodeModel::Small || M == CodeModel::Kernel || | |
+ M == CodeModel::Large)) | |
WrapperKind = X86ISD::WrapperRIP; | |
else if (Subtarget->isPICStyleGOT()) | |
OpFlag = X86II::MO_GOTOFF; | |
diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp | |
index 88a6d00..2de7b66 100644 | |
--- a/lib/Target/X86/X86MCInstLower.cpp | |
+++ b/lib/Target/X86/X86MCInstLower.cpp | |
@@ -831,6 +831,10 @@ void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI, | |
X86MCInstLower &MCIL) { | |
assert(Subtarget->is64Bit() && "Statepoint currently only supports X86-64"); | |
+ // Record our statepoint node in the same section used by STACKMAP | |
+ // and PATCHPOINT | |
+ SM.recordStatepoint(MI); | |
+ | |
StatepointOpers SOpers(&MI); | |
if (unsigned PatchBytes = SOpers.getNumPatchBytes()) { | |
EmitNops(*OutStreamer, PatchBytes, Subtarget->is64Bit(), | |
@@ -874,10 +878,6 @@ void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI, | |
CallInst.addOperand(CallTargetMCOp); | |
OutStreamer->EmitInstruction(CallInst, getSubtargetInfo()); | |
} | |
- | |
- // Record our statepoint node in the same section used by STACKMAP | |
- // and PATCHPOINT | |
- SM.recordStatepoint(MI); | |
} | |
void X86AsmPrinter::LowerFAULTING_LOAD_OP(const MachineInstr &MI, | |
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp | |
index 7b04e81..66fcabb 100644 | |
--- a/lib/Target/X86/X86RegisterInfo.cpp | |
+++ b/lib/Target/X86/X86RegisterInfo.cpp | |
@@ -230,6 +230,42 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { | |
assert(MF && "MachineFunction required"); | |
switch (MF->getFunction()->getCallingConv()) { | |
+ case CallingConv::X86_64_V8_S2: | |
+ return CSR_X86_64_V8_S2_SaveList; | |
+ case CallingConv::X86_64_V8_S3: | |
+ return CSR_X86_64_V8_S3_SaveList; | |
+ case CallingConv::X86_64_V8_S1: | |
+ return CSR_X86_64_V8_S1_SaveList; | |
+ case CallingConv::X86_64_V8_S4: | |
+ return CSR_X86_64_V8_S4_SaveList; | |
+ case CallingConv::X86_64_V8_S5: | |
+ return CSR_X86_64_V8_S5_SaveList; | |
+ case CallingConv::X86_64_V8_S6: | |
+ return CSR_X86_64_V8_S6_SaveList; | |
+ case CallingConv::X86_64_V8_S7: | |
+ return CSR_X86_64_V8_S7_SaveList; | |
+ case CallingConv::X86_64_V8_S8: | |
+ return CSR_X86_64_V8_S8_SaveList; | |
+ case CallingConv::X86_64_V8_S9: | |
+ return CSR_X86_64_V8_S9_SaveList; | |
+ case CallingConv::X86_64_V8_S10: | |
+ return CSR_X86_64_V8_S10_SaveList; | |
+ case CallingConv::X86_64_V8_S11: | |
+ return CSR_X86_64_V8_S11_SaveList; | |
+ case CallingConv::X86_64_V8_S12: | |
+ return CSR_X86_64_V8_S12_SaveList; | |
+ case CallingConv::X86_64_V8_Stub: | |
+ return CSR_X86_64_V8_Stub_SaveList; | |
+ case CallingConv::X86_64_V8_S13: | |
+ return CSR_X86_64_V8_S13_SaveList; | |
+ case CallingConv::X86_64_V8_CES: | |
+ return CSR_X86_64_V8_CES_SaveList; | |
+ case CallingConv::X86_64_V8_RWS: | |
+ return CSR_X86_64_V8_RWS_SaveList; | |
+ case CallingConv::X86_64_V8: | |
+ return CSR_X86_64_V8_SaveList; | |
+ case CallingConv::X86_64_V8_E: | |
+ return CSR_X86_64_V8_E_SaveList; | |
case CallingConv::GHC: | |
case CallingConv::HiPE: | |
return CSR_NoRegs_SaveList; | |
@@ -290,6 +326,42 @@ X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF, | |
bool HasAVX512 = Subtarget.hasAVX512(); | |
switch (CC) { | |
+ case CallingConv::X86_64_V8_CES: | |
+ return CSR_X86_64_V8_CES_RegMask; | |
+ case CallingConv::X86_64_V8_RWS: | |
+ return CSR_X86_64_V8_RWS_RegMask; | |
+ case CallingConv::X86_64_V8_S1: | |
+ return CSR_X86_64_V8_S1_RegMask; | |
+ case CallingConv::X86_64_V8_S3: | |
+ return CSR_X86_64_V8_S3_RegMask; | |
+ case CallingConv::X86_64_V8_S4: | |
+ return CSR_X86_64_V8_S4_RegMask; | |
+ case CallingConv::X86_64_V8_S5: | |
+ return CSR_X86_64_V8_S5_RegMask; | |
+ case CallingConv::X86_64_V8_S6: | |
+ return CSR_X86_64_V8_S6_RegMask; | |
+ case CallingConv::X86_64_V8_S7: | |
+ return CSR_X86_64_V8_S7_RegMask; | |
+ case CallingConv::X86_64_V8_S8: | |
+ return CSR_X86_64_V8_S8_RegMask; | |
+ case CallingConv::X86_64_V8_S9: | |
+ return CSR_X86_64_V8_S9_RegMask; | |
+ case CallingConv::X86_64_V8_S10: | |
+ return CSR_X86_64_V8_S10_RegMask; | |
+ case CallingConv::X86_64_V8_S11: | |
+ return CSR_X86_64_V8_S11_RegMask; | |
+ case CallingConv::X86_64_V8_S12: | |
+ return CSR_X86_64_V8_S12_RegMask; | |
+ case CallingConv::X86_64_V8_S13: | |
+ return CSR_X86_64_V8_S13_RegMask; | |
+ case CallingConv::X86_64_V8_Stub: | |
+ return CSR_X86_64_V8_Stub_RegMask; | |
+ case CallingConv::X86_64_V8_S2: | |
+ return CSR_X86_64_V8_S2_RegMask; | |
+ case CallingConv::X86_64_V8_E: | |
+ return CSR_X86_64_V8_E_RegMask; | |
+ case CallingConv::X86_64_V8: | |
+ return CSR_X86_64_V8_RegMask; | |
case CallingConv::GHC: | |
case CallingConv::HiPE: | |
return CSR_NoRegs_RegMask; | |
diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td | |
index cdb151c..b08aa22 100644 | |
--- a/lib/Target/X86/X86RegisterInfo.td | |
+++ b/lib/Target/X86/X86RegisterInfo.td | |
@@ -326,8 +326,8 @@ def BND3 : X86Reg<"bnd3", 3>; | |
// require a REX prefix. For example, "addb %ah, %dil" and "movzbl %ah, %r8d" | |
// cannot be encoded. | |
def GR8 : RegisterClass<"X86", [i8], 8, | |
- (add AL, CL, DL, AH, CH, DH, BL, BH, SIL, DIL, BPL, SPL, | |
- R8B, R9B, R10B, R11B, R14B, R15B, R12B, R13B)> { | |
+ (add AL, CL, DL, BL, AH, CH, DH, BH, DIL, SIL, BPL, SPL, | |
+ R8B, R9B, R10B, R11B, R14B, R15B, R12B)> { | |
let AltOrders = [(sub GR8, AH, BH, CH, DH)]; | |
let AltOrderSelect = [{ | |
return MF.getSubtarget<X86Subtarget>().is64Bit(); | |
@@ -335,19 +335,19 @@ def GR8 : RegisterClass<"X86", [i8], 8, | |
} | |
def GR16 : RegisterClass<"X86", [i16], 16, | |
- (add AX, CX, DX, SI, DI, BX, BP, SP, | |
- R8W, R9W, R10W, R11W, R14W, R15W, R12W, R13W)>; | |
+ (add AX, CX, DX, BX, DI, SI, BP, SP, | |
+ R8W, R9W, R10W, R11W, R14W, R15W, R12W)>; | |
def GR32 : RegisterClass<"X86", [i32], 32, | |
- (add EAX, ECX, EDX, ESI, EDI, EBX, EBP, ESP, | |
- R8D, R9D, R10D, R11D, R14D, R15D, R12D, R13D)>; | |
+ (add EAX, ECX, EDX, EBX, EDI, ESI, EBP, ESP, | |
+ R8D, R9D, R10D, R11D, R14D, R15D, R12D)>; | |
// GR64 - 64-bit GPRs. This oddly includes RIP, which isn't accurate, since | |
// RIP isn't really a register and it can't be used anywhere except in an | |
// address, but it doesn't cause trouble. | |
def GR64 : RegisterClass<"X86", [i64], 64, | |
- (add RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11, | |
- RBX, R14, R15, R12, R13, RBP, RSP, RIP)>; | |
+ (add RAX, RCX, RDX, RBX, RDI, RSI, | |
+ R8, R9, R10, R11, R14, R15, R12, RBP, RSP, RIP)>; | |
// Segment registers for use by MOV instructions (and others) that have a | |
// segment register as one operand. Always contain a 16-bit segment | |
@@ -491,4 +491,4 @@ def VK32WM : RegisterClass<"X86", [v32i1], 32, (add VK16WM)> {let Size = 32;} | |
def VK64WM : RegisterClass<"X86", [v64i1], 64, (add VK32WM)> {let Size = 64;} | |
// Bound registers | |
-def BNDR : RegisterClass<"X86", [v2i64], 128, (sequence "BND%u", 0, 3)>; | |
\ No newline at end of file | |
+def BNDR : RegisterClass<"X86", [v2i64], 128, (sequence "BND%u", 0, 3)>; | |
diff --git a/lib/Transforms/Scalar/PlaceSafepoints.cpp b/lib/Transforms/Scalar/PlaceSafepoints.cpp | |
index 1473e3d..6bc1f2e 100644 | |
--- a/lib/Transforms/Scalar/PlaceSafepoints.cpp | |
+++ b/lib/Transforms/Scalar/PlaceSafepoints.cpp | |
@@ -205,6 +205,15 @@ static bool needsStatepoint(const CallSite &CS) { | |
if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS)) { | |
return false; | |
} | |
+ | |
+ AttributeSet Attrs = CS.getAttributes(); | |
+ Attribute NoStatepoint = | |
+ Attrs.getAttribute(AttributeSet::FunctionIndex, "no-statepoint-please"); | |
+ if (NoStatepoint.isStringAttribute()) { | |
+ // Client explicitly asked not to insert statepoint. | |
+ return false; | |
+ } | |
+ | |
return true; | |
} | |
@@ -504,17 +513,22 @@ static bool shouldRewriteFunction(Function &F) { | |
const char *FunctionGCName = F.getGC(); | |
const StringRef StatepointExampleName("statepoint-example"); | |
const StringRef CoreCLRName("coreclr"); | |
+ const StringRef V8GCName("v8-gc"); | |
return (StatepointExampleName == FunctionGCName) || | |
- (CoreCLRName == FunctionGCName); | |
+ (CoreCLRName == FunctionGCName) || | |
+ (V8GCName == FunctionGCName); | |
} else | |
return false; | |
} | |
// TODO: These should become properties of the GCStrategy, possibly with | |
// command line overrides. | |
-static bool enableEntrySafepoints(Function &F) { return !NoEntry; } | |
-static bool enableBackedgeSafepoints(Function &F) { return !NoBackedge; } | |
-static bool enableCallSafepoints(Function &F) { return !NoCall; } | |
+static bool enableEntrySafepoints(Function &F) { return false; } | |
+ //!NoEntry; } | |
+static bool enableBackedgeSafepoints(Function &F) { return false; } | |
+ //!NoBackedge; } | |
+static bool enableCallSafepoints(Function &F) { return true; } | |
+//!NoCall; } | |
// Normalize basic block to make it ready to be target of invoke statepoint. | |
// Ensure that 'BB' does not have phi nodes. It may require spliting it. | |
@@ -719,6 +733,9 @@ bool PlaceSafepoints::runOnFunction(Function &F) { | |
// Can not RAUW for the invoke gc result in case of phi nodes preset. | |
assert(CS.isCall() || !isa<PHINode>(cast<Instruction>(GCResult)->getParent()->begin())); | |
+ // Now the gc.result has the name the call has previously had. | |
+ GCResult->takeName(CS.getInstruction()); | |
+ | |
// Replace all uses with the new call | |
CS.getInstruction()->replaceAllUsesWith(GCResult); | |
} | |
diff --git a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp | |
index 2bb0918..f8d6015 100644 | |
--- a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp | |
+++ b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp | |
@@ -2467,8 +2467,10 @@ static bool shouldRewriteStatepointsIn(Function &F) { | |
const char *FunctionGCName = F.getGC(); | |
const StringRef StatepointExampleName("statepoint-example"); | |
const StringRef CoreCLRName("coreclr"); | |
+ const StringRef V8GCName("v8-gc"); | |
return (StatepointExampleName == FunctionGCName) || | |
- (CoreCLRName == FunctionGCName); | |
+ (CoreCLRName == FunctionGCName) || | |
+ (V8GCName == FunctionGCName); | |
} else | |
return false; | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment