v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
assembler.cc
Go to the documentation of this file.
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 #include "assembler.h"
36 
37 #include <cmath>
38 #include "api.h"
39 #include "builtins.h"
40 #include "counters.h"
41 #include "cpu.h"
42 #include "debug.h"
43 #include "deoptimizer.h"
44 #include "execution.h"
45 #include "ic.h"
46 #include "isolate-inl.h"
47 #include "jsregexp.h"
48 #include "lazy-instance.h"
49 #include "platform.h"
50 #include "regexp-macro-assembler.h"
51 #include "regexp-stack.h"
52 #include "runtime.h"
53 #include "serialize.h"
54 #include "store-buffer-inl.h"
55 #include "stub-cache.h"
56 #include "token.h"
57 
58 #if V8_TARGET_ARCH_IA32
60 #elif V8_TARGET_ARCH_X64
61 #include "x64/assembler-x64-inl.h"
62 #elif V8_TARGET_ARCH_ARM64
64 #elif V8_TARGET_ARCH_ARM
65 #include "arm/assembler-arm-inl.h"
66 #elif V8_TARGET_ARCH_MIPS
68 #else
69 #error "Unknown architecture."
70 #endif
71 
72 // Include native regexp-macro-assembler.
73 #ifndef V8_INTERPRETED_REGEXP
74 #if V8_TARGET_ARCH_IA32
76 #elif V8_TARGET_ARCH_X64
78 #elif V8_TARGET_ARCH_ARM64
80 #elif V8_TARGET_ARCH_ARM
82 #elif V8_TARGET_ARCH_MIPS
84 #else // Unknown architecture.
85 #error "Unknown architecture."
86 #endif // Target architecture.
87 #endif // V8_INTERPRETED_REGEXP
88 
89 namespace v8 {
90 namespace internal {
91 
92 // -----------------------------------------------------------------------------
93 // Common double constants.
94 
95 struct DoubleConstant BASE_EMBEDDED {
96  double min_int;
97  double one_half;
99  double minus_zero;
100  double zero;
104  double the_hole_nan;
105  double uint32_bias;
106 };
107 
108 static DoubleConstant double_constants;
109 
110 const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
111 
112 static bool math_exp_data_initialized = false;
113 static Mutex* math_exp_data_mutex = NULL;
114 static double* math_exp_constants_array = NULL;
115 static double* math_exp_log_table_array = NULL;
116 
117 // -----------------------------------------------------------------------------
118 // Implementation of AssemblerBase
119 
120 AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
121  : isolate_(isolate),
122  jit_cookie_(0),
123  enabled_cpu_features_(0),
124  emit_debug_code_(FLAG_debug_code),
125  predictable_code_size_(false) {
126  if (FLAG_mask_constants_with_cookie && isolate != NULL) {
127  jit_cookie_ = isolate->random_number_generator()->NextInt();
128  }
129  if (buffer == NULL) {
130  // Do our own buffer management.
131  if (buffer_size <= kMinimalBufferSize) {
132  buffer_size = kMinimalBufferSize;
133  if (isolate->assembler_spare_buffer() != NULL) {
134  buffer = isolate->assembler_spare_buffer();
135  isolate->set_assembler_spare_buffer(NULL);
136  }
137  }
138  if (buffer == NULL) buffer = NewArray<byte>(buffer_size);
139  own_buffer_ = true;
140  } else {
141  // Use externally provided buffer instead.
142  ASSERT(buffer_size > 0);
143  own_buffer_ = false;
144  }
145  buffer_ = static_cast<byte*>(buffer);
146  buffer_size_ = buffer_size;
147 
148  pc_ = buffer_;
149 }
150 
151 
153  if (own_buffer_) {
154  if (isolate() != NULL &&
155  isolate()->assembler_spare_buffer() == NULL &&
157  isolate()->set_assembler_spare_buffer(buffer_);
158  } else {
160  }
161  }
162 }
163 
164 
165 // -----------------------------------------------------------------------------
166 // Implementation of PredictableCodeSizeScope
167 
169  int expected_size)
170  : assembler_(assembler),
171  expected_size_(expected_size),
172  start_offset_(assembler->pc_offset()),
173  old_value_(assembler->predictable_code_size()) {
174  assembler_->set_predictable_code_size(true);
175 }
176 
177 
179  // TODO(svenpanne) Remove the 'if' when everything works.
180  if (expected_size_ >= 0) {
181  CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
182  }
183  assembler_->set_predictable_code_size(old_value_);
184 }
185 
186 
187 // -----------------------------------------------------------------------------
188 // Implementation of CpuFeatureScope
189 
190 #ifdef DEBUG
191 CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f)
192  : assembler_(assembler) {
194  old_enabled_ = assembler_->enabled_cpu_features();
195  uint64_t mask = static_cast<uint64_t>(1) << f;
196  // TODO(svenpanne) This special case below doesn't belong here!
197 #if V8_TARGET_ARCH_ARM
198  // ARMv7 is implied by VFP3.
199  if (f == VFP3) {
200  mask |= static_cast<uint64_t>(1) << ARMv7;
201  }
202 #endif
203  assembler_->set_enabled_cpu_features(old_enabled_ | mask);
204 }
205 
206 
207 CpuFeatureScope::~CpuFeatureScope() {
208  assembler_->set_enabled_cpu_features(old_enabled_);
209 }
210 #endif
211 
212 
213 // -----------------------------------------------------------------------------
214 // Implementation of PlatformFeatureScope
215 
216 PlatformFeatureScope::PlatformFeatureScope(CpuFeature f)
217  : old_cross_compile_(CpuFeatures::cross_compile_) {
218  // CpuFeatures is a global singleton, therefore this is only safe in
219  // single threaded code.
221  uint64_t mask = static_cast<uint64_t>(1) << f;
222  CpuFeatures::cross_compile_ |= mask;
223 }
224 
225 
226 PlatformFeatureScope::~PlatformFeatureScope() {
227  CpuFeatures::cross_compile_ = old_cross_compile_;
228 }
229 
230 
231 // -----------------------------------------------------------------------------
232 // Implementation of Label
233 
234 int Label::pos() const {
235  if (pos_ < 0) return -pos_ - 1;
236  if (pos_ > 0) return pos_ - 1;
237  UNREACHABLE();
238  return 0;
239 }
240 
241 
242 // -----------------------------------------------------------------------------
243 // Implementation of RelocInfoWriter and RelocIterator
244 //
245 // Relocation information is written backwards in memory, from high addresses
246 // towards low addresses, byte by byte. Therefore, in the encodings listed
247 // below, the first byte listed it at the highest address, and successive
248 // bytes in the record are at progressively lower addresses.
249 //
250 // Encoding
251 //
252 // The most common modes are given single-byte encodings. Also, it is
253 // easy to identify the type of reloc info and skip unwanted modes in
254 // an iteration.
255 //
256 // The encoding relies on the fact that there are fewer than 14
257 // different relocation modes using standard non-compact encoding.
258 //
259 // The first byte of a relocation record has a tag in its low 2 bits:
260 // Here are the record schemes, depending on the low tag and optional higher
261 // tags.
262 //
263 // Low tag:
264 // 00: embedded_object: [6-bit pc delta] 00
265 //
266 // 01: code_target: [6-bit pc delta] 01
267 //
268 // 10: short_data_record: [6-bit pc delta] 10 followed by
269 // [6-bit data delta] [2-bit data type tag]
270 //
271 // 11: long_record [2-bit high tag][4 bit middle_tag] 11
272 // followed by variable data depending on type.
273 //
274 // 2-bit data type tags, used in short_data_record and data_jump long_record:
275 // code_target_with_id: 00
276 // position: 01
277 // statement_position: 10
278 // comment: 11 (not used in short_data_record)
279 //
280 // Long record format:
281 // 4-bit middle_tag:
282 // 0000 - 1100 : Short record for RelocInfo::Mode middle_tag + 2
283 // (The middle_tag encodes rmode - RelocInfo::LAST_COMPACT_ENUM,
284 // and is between 0000 and 1100)
285 // The format is:
286 // 00 [4 bit middle_tag] 11 followed by
287 // 00 [6 bit pc delta]
288 //
289 // 1101: constant or veneer pool. Used only on ARM and ARM64 for now.
290 // The format is: [2-bit sub-type] 1101 11
291 // signed int (size of the pool).
292 // The 2-bit sub-types are:
293 // 00: constant pool
294 // 01: veneer pool
295 // 1110: long_data_record
296 // The format is: [2-bit data_type_tag] 1110 11
297 // signed intptr_t, lowest byte written first
298 // (except data_type code_target_with_id, which
299 // is followed by a signed int, not intptr_t.)
300 //
301 // 1111: long_pc_jump
302 // The format is:
303 // pc-jump: 00 1111 11,
304 // 00 [6 bits pc delta]
305 // or
306 // pc-jump (variable length):
307 // 01 1111 11,
308 // [7 bits data] 0
309 // ...
310 // [7 bits data] 1
311 // (Bits 6..31 of pc delta, with leading zeroes
312 // dropped, and last non-zero chunk tagged with 1.)
313 
314 
315 #ifdef DEBUG
316 const int kMaxStandardNonCompactModes = 14;
317 #endif
318 
319 const int kTagBits = 2;
320 const int kTagMask = (1 << kTagBits) - 1;
321 const int kExtraTagBits = 4;
322 const int kLocatableTypeTagBits = 2;
324 
325 const int kEmbeddedObjectTag = 0;
326 const int kCodeTargetTag = 1;
327 const int kLocatableTag = 2;
328 const int kDefaultTag = 3;
329 
330 const int kPCJumpExtraTag = (1 << kExtraTagBits) - 1;
331 
333 const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
334 const int RelocInfo::kMaxSmallPCDelta = kSmallPCDeltaMask;
335 
337 const int kChunkBits = 7;
338 const int kChunkMask = (1 << kChunkBits) - 1;
339 const int kLastChunkTagBits = 1;
340 const int kLastChunkTagMask = 1;
341 const int kLastChunkTag = 1;
342 
343 
345 
346 const int kCodeWithIdTag = 0;
348 const int kStatementPositionTag = 2;
349 const int kCommentTag = 3;
350 
352 const int kConstPoolTag = 0;
353 const int kVeneerPoolTag = 1;
354 
355 
356 uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
357  // Return if the pc_delta can fit in kSmallPCDeltaBits bits.
358  // Otherwise write a variable length PC jump for the bits that do
359  // not fit in the kSmallPCDeltaBits bits.
360  if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
362  uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
363  ASSERT(pc_jump > 0);
364  // Write kChunkBits size chunks of the pc_jump.
365  for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) {
366  byte b = pc_jump & kChunkMask;
367  *--pos_ = b << kLastChunkTagBits;
368  }
369  // Tag the last chunk so it can be identified.
370  *pos_ = *pos_ | kLastChunkTag;
371  // Return the remaining kSmallPCDeltaBits of the pc_delta.
372  return pc_delta & kSmallPCDeltaMask;
373 }
374 
375 
376 void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) {
377  // Write a byte of tagged pc-delta, possibly preceded by var. length pc-jump.
378  pc_delta = WriteVariableLengthPCJump(pc_delta);
379  *--pos_ = pc_delta << kTagBits | tag;
380 }
381 
382 
383 void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) {
384  *--pos_ = static_cast<byte>(data_delta << kLocatableTypeTagBits | tag);
385 }
386 
387 
388 void RelocInfoWriter::WriteExtraTag(int extra_tag, int top_tag) {
389  *--pos_ = static_cast<int>(top_tag << (kTagBits + kExtraTagBits) |
390  extra_tag << kTagBits |
391  kDefaultTag);
392 }
393 
394 
395 void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) {
396  // Write two-byte tagged pc-delta, possibly preceded by var. length pc-jump.
397  pc_delta = WriteVariableLengthPCJump(pc_delta);
398  WriteExtraTag(extra_tag, 0);
399  *--pos_ = pc_delta;
400 }
401 
402 
403 void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
404  WriteExtraTag(kDataJumpExtraTag, top_tag);
405  for (int i = 0; i < kIntSize; i++) {
406  *--pos_ = static_cast<byte>(data_delta);
407  // Signed right shift is arithmetic shift. Tested in test-utils.cc.
408  data_delta = data_delta >> kBitsPerByte;
409  }
410 }
411 
412 
413 void RelocInfoWriter::WriteExtraTaggedPoolData(int data, int pool_type) {
414  WriteExtraTag(kPoolExtraTag, pool_type);
415  for (int i = 0; i < kIntSize; i++) {
416  *--pos_ = static_cast<byte>(data);
417  // Signed right shift is arithmetic shift. Tested in test-utils.cc.
418  data = data >> kBitsPerByte;
419  }
420 }
421 
422 
423 void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
424  WriteExtraTag(kDataJumpExtraTag, top_tag);
425  for (int i = 0; i < kIntptrSize; i++) {
426  *--pos_ = static_cast<byte>(data_delta);
427  // Signed right shift is arithmetic shift. Tested in test-utils.cc.
428  data_delta = data_delta >> kBitsPerByte;
429  }
430 }
431 
432 
433 void RelocInfoWriter::Write(const RelocInfo* rinfo) {
434 #ifdef DEBUG
435  byte* begin_pos = pos_;
436 #endif
437  ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
438  ASSERT(rinfo->pc() - last_pc_ >= 0);
439  ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
440  <= kMaxStandardNonCompactModes);
441  // Use unsigned delta-encoding for pc.
442  uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
443  RelocInfo::Mode rmode = rinfo->rmode();
444 
445  // The two most common modes are given small tags, and usually fit in a byte.
446  if (rmode == RelocInfo::EMBEDDED_OBJECT) {
447  WriteTaggedPC(pc_delta, kEmbeddedObjectTag);
448  } else if (rmode == RelocInfo::CODE_TARGET) {
449  WriteTaggedPC(pc_delta, kCodeTargetTag);
450  ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
451  } else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
452  // Use signed delta-encoding for id.
453  ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
454  int id_delta = static_cast<int>(rinfo->data()) - last_id_;
455  // Check if delta is small enough to fit in a tagged byte.
456  if (is_intn(id_delta, kSmallDataBits)) {
457  WriteTaggedPC(pc_delta, kLocatableTag);
458  WriteTaggedData(id_delta, kCodeWithIdTag);
459  } else {
460  // Otherwise, use costly encoding.
461  WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
462  WriteExtraTaggedIntData(id_delta, kCodeWithIdTag);
463  }
464  last_id_ = static_cast<int>(rinfo->data());
465  } else if (RelocInfo::IsPosition(rmode)) {
466  // Use signed delta-encoding for position.
467  ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
468  int pos_delta = static_cast<int>(rinfo->data()) - last_position_;
469  int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag
471  // Check if delta is small enough to fit in a tagged byte.
472  if (is_intn(pos_delta, kSmallDataBits)) {
473  WriteTaggedPC(pc_delta, kLocatableTag);
474  WriteTaggedData(pos_delta, pos_type_tag);
475  } else {
476  // Otherwise, use costly encoding.
477  WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
478  WriteExtraTaggedIntData(pos_delta, pos_type_tag);
479  }
480  last_position_ = static_cast<int>(rinfo->data());
481  } else if (RelocInfo::IsComment(rmode)) {
482  // Comments are normally not generated, so we use the costly encoding.
483  WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
484  WriteExtraTaggedData(rinfo->data(), kCommentTag);
485  ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
486  } else if (RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)) {
487  WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
488  WriteExtraTaggedPoolData(static_cast<int>(rinfo->data()),
489  RelocInfo::IsConstPool(rmode) ? kConstPoolTag
490  : kVeneerPoolTag);
491  } else {
492  ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
493  int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
494  // For all other modes we simply use the mode as the extra tag.
495  // None of these modes need a data component.
496  ASSERT(saved_mode < kPCJumpExtraTag && saved_mode < kDataJumpExtraTag);
497  WriteExtraTaggedPC(pc_delta, saved_mode);
498  }
499  last_pc_ = rinfo->pc();
500 #ifdef DEBUG
501  ASSERT(begin_pos - pos_ <= kMaxSize);
502 #endif
503 }
504 
505 
506 inline int RelocIterator::AdvanceGetTag() {
507  return *--pos_ & kTagMask;
508 }
509 
510 
511 inline int RelocIterator::GetExtraTag() {
512  return (*pos_ >> kTagBits) & ((1 << kExtraTagBits) - 1);
513 }
514 
515 
516 inline int RelocIterator::GetTopTag() {
517  return *pos_ >> (kTagBits + kExtraTagBits);
518 }
519 
520 
521 inline void RelocIterator::ReadTaggedPC() {
522  rinfo_.pc_ += *pos_ >> kTagBits;
523 }
524 
525 
526 inline void RelocIterator::AdvanceReadPC() {
527  rinfo_.pc_ += *--pos_;
528 }
529 
530 
531 void RelocIterator::AdvanceReadId() {
532  int x = 0;
533  for (int i = 0; i < kIntSize; i++) {
534  x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
535  }
536  last_id_ += x;
537  rinfo_.data_ = last_id_;
538 }
539 
540 
541 void RelocIterator::AdvanceReadPoolData() {
542  int x = 0;
543  for (int i = 0; i < kIntSize; i++) {
544  x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
545  }
546  rinfo_.data_ = x;
547 }
548 
549 
550 void RelocIterator::AdvanceReadPosition() {
551  int x = 0;
552  for (int i = 0; i < kIntSize; i++) {
553  x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
554  }
555  last_position_ += x;
556  rinfo_.data_ = last_position_;
557 }
558 
559 
560 void RelocIterator::AdvanceReadData() {
561  intptr_t x = 0;
562  for (int i = 0; i < kIntptrSize; i++) {
563  x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte;
564  }
565  rinfo_.data_ = x;
566 }
567 
568 
569 void RelocIterator::AdvanceReadVariableLengthPCJump() {
570  // Read the 32-kSmallPCDeltaBits most significant bits of the
571  // pc jump in kChunkBits bit chunks and shift them into place.
572  // Stop when the last chunk is encountered.
573  uint32_t pc_jump = 0;
574  for (int i = 0; i < kIntSize; i++) {
575  byte pc_jump_part = *--pos_;
576  pc_jump |= (pc_jump_part >> kLastChunkTagBits) << i * kChunkBits;
577  if ((pc_jump_part & kLastChunkTagMask) == 1) break;
578  }
579  // The least significant kSmallPCDeltaBits bits will be added
580  // later.
581  rinfo_.pc_ += pc_jump << kSmallPCDeltaBits;
582 }
583 
584 
585 inline int RelocIterator::GetLocatableTypeTag() {
586  return *pos_ & ((1 << kLocatableTypeTagBits) - 1);
587 }
588 
589 
590 inline void RelocIterator::ReadTaggedId() {
591  int8_t signed_b = *pos_;
592  // Signed right shift is arithmetic shift. Tested in test-utils.cc.
593  last_id_ += signed_b >> kLocatableTypeTagBits;
594  rinfo_.data_ = last_id_;
595 }
596 
597 
598 inline void RelocIterator::ReadTaggedPosition() {
599  int8_t signed_b = *pos_;
600  // Signed right shift is arithmetic shift. Tested in test-utils.cc.
601  last_position_ += signed_b >> kLocatableTypeTagBits;
602  rinfo_.data_ = last_position_;
603 }
604 
605 
606 static inline RelocInfo::Mode GetPositionModeFromTag(int tag) {
608  tag == kStatementPositionTag);
609  return (tag == kNonstatementPositionTag) ?
610  RelocInfo::POSITION :
611  RelocInfo::STATEMENT_POSITION;
612 }
613 
614 
616  ASSERT(!done());
617  // Basically, do the opposite of RelocInfoWriter::Write.
618  // Reading of data is as far as possible avoided for unwanted modes,
619  // but we must always update the pc.
620  //
621  // We exit this loop by returning when we find a mode we want.
622  while (pos_ > end_) {
623  int tag = AdvanceGetTag();
624  if (tag == kEmbeddedObjectTag) {
625  ReadTaggedPC();
626  if (SetMode(RelocInfo::EMBEDDED_OBJECT)) return;
627  } else if (tag == kCodeTargetTag) {
628  ReadTaggedPC();
629  if (SetMode(RelocInfo::CODE_TARGET)) return;
630  } else if (tag == kLocatableTag) {
631  ReadTaggedPC();
632  Advance();
633  int locatable_tag = GetLocatableTypeTag();
634  if (locatable_tag == kCodeWithIdTag) {
635  if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
636  ReadTaggedId();
637  return;
638  }
639  } else {
640  // Compact encoding is never used for comments,
641  // so it must be a position.
642  ASSERT(locatable_tag == kNonstatementPositionTag ||
643  locatable_tag == kStatementPositionTag);
644  if (mode_mask_ & RelocInfo::kPositionMask) {
645  ReadTaggedPosition();
646  if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
647  }
648  }
649  } else {
650  ASSERT(tag == kDefaultTag);
651  int extra_tag = GetExtraTag();
652  if (extra_tag == kPCJumpExtraTag) {
653  if (GetTopTag() == kVariableLengthPCJumpTopTag) {
654  AdvanceReadVariableLengthPCJump();
655  } else {
656  AdvanceReadPC();
657  }
658  } else if (extra_tag == kDataJumpExtraTag) {
659  int locatable_tag = GetTopTag();
660  if (locatable_tag == kCodeWithIdTag) {
661  if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
662  AdvanceReadId();
663  return;
664  }
665  Advance(kIntSize);
666  } else if (locatable_tag != kCommentTag) {
667  ASSERT(locatable_tag == kNonstatementPositionTag ||
668  locatable_tag == kStatementPositionTag);
669  if (mode_mask_ & RelocInfo::kPositionMask) {
670  AdvanceReadPosition();
671  if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
672  } else {
673  Advance(kIntSize);
674  }
675  } else {
676  ASSERT(locatable_tag == kCommentTag);
677  if (SetMode(RelocInfo::COMMENT)) {
678  AdvanceReadData();
679  return;
680  }
681  Advance(kIntptrSize);
682  }
683  } else if (extra_tag == kPoolExtraTag) {
684  int pool_type = GetTopTag();
685  ASSERT(pool_type == kConstPoolTag || pool_type == kVeneerPoolTag);
686  RelocInfo::Mode rmode = (pool_type == kConstPoolTag) ?
687  RelocInfo::CONST_POOL : RelocInfo::VENEER_POOL;
688  if (SetMode(rmode)) {
689  AdvanceReadPoolData();
690  return;
691  }
692  Advance(kIntSize);
693  } else {
694  AdvanceReadPC();
695  int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM;
696  if (SetMode(static_cast<RelocInfo::Mode>(rmode))) return;
697  }
698  }
699  }
700  if (code_age_sequence_ != NULL) {
701  byte* old_code_age_sequence = code_age_sequence_;
702  code_age_sequence_ = NULL;
703  if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) {
704  rinfo_.data_ = 0;
705  rinfo_.pc_ = old_code_age_sequence;
706  return;
707  }
708  }
709  done_ = true;
710 }
711 
712 
714  rinfo_.host_ = code;
715  rinfo_.pc_ = code->instruction_start();
716  rinfo_.data_ = 0;
717  // Relocation info is read backwards.
718  pos_ = code->relocation_start() + code->relocation_size();
719  end_ = code->relocation_start();
720  done_ = false;
721  mode_mask_ = mode_mask;
722  last_id_ = 0;
723  last_position_ = 0;
724  byte* sequence = code->FindCodeAgeSequence();
725  if (sequence != NULL && !Code::IsYoungSequence(sequence)) {
726  code_age_sequence_ = sequence;
727  } else {
728  code_age_sequence_ = NULL;
729  }
730  if (mode_mask_ == 0) pos_ = end_;
731  next();
732 }
733 
734 
735 RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
736  rinfo_.pc_ = desc.buffer;
737  rinfo_.data_ = 0;
738  // Relocation info is read backwards.
739  pos_ = desc.buffer + desc.buffer_size;
740  end_ = pos_ - desc.reloc_size;
741  done_ = false;
742  mode_mask_ = mode_mask;
743  last_id_ = 0;
744  last_position_ = 0;
745  code_age_sequence_ = NULL;
746  if (mode_mask_ == 0) pos_ = end_;
747  next();
748 }
749 
750 
751 // -----------------------------------------------------------------------------
752 // Implementation of RelocInfo
753 
754 
755 #ifdef DEBUG
756 bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
757  // Ensure there are no code targets or embedded objects present in the
758  // deoptimization entries, they would require relocation after code
759  // generation.
760  int mode_mask = RelocInfo::kCodeTargetMask |
761  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
762  RelocInfo::ModeMask(RelocInfo::CELL) |
763  RelocInfo::kApplyMask;
764  RelocIterator it(desc, mode_mask);
765  return !it.done();
766 }
767 #endif
768 
769 
770 #ifdef ENABLE_DISASSEMBLER
771 const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
772  switch (rmode) {
773  case RelocInfo::NONE32:
774  return "no reloc 32";
775  case RelocInfo::NONE64:
776  return "no reloc 64";
777  case RelocInfo::EMBEDDED_OBJECT:
778  return "embedded object";
779  case RelocInfo::CONSTRUCT_CALL:
780  return "code target (js construct call)";
782 #ifndef ENABLE_DEBUGGER_SUPPORT
783  UNREACHABLE();
784 #endif
785  return "debug break";
786  case RelocInfo::CODE_TARGET:
787  return "code target";
788  case RelocInfo::CODE_TARGET_WITH_ID:
789  return "code target with id";
790  case RelocInfo::CELL:
791  return "property cell";
793  return "runtime entry";
794  case RelocInfo::JS_RETURN:
795  return "js return";
796  case RelocInfo::COMMENT:
797  return "comment";
798  case RelocInfo::POSITION:
799  return "position";
800  case RelocInfo::STATEMENT_POSITION:
801  return "statement position";
802  case RelocInfo::EXTERNAL_REFERENCE:
803  return "external reference";
804  case RelocInfo::INTERNAL_REFERENCE:
805  return "internal reference";
806  case RelocInfo::CONST_POOL:
807  return "constant pool";
808  case RelocInfo::VENEER_POOL:
809  return "veneer pool";
810  case RelocInfo::DEBUG_BREAK_SLOT:
811 #ifndef ENABLE_DEBUGGER_SUPPORT
812  UNREACHABLE();
813 #endif
814  return "debug break slot";
815  case RelocInfo::CODE_AGE_SEQUENCE:
816  return "code_age_sequence";
817  case RelocInfo::NUMBER_OF_MODES:
818  UNREACHABLE();
819  return "number_of_modes";
820  }
821  return "unknown relocation type";
822 }
823 
824 
825 void RelocInfo::Print(Isolate* isolate, FILE* out) {
826  PrintF(out, "%p %s", pc_, RelocModeName(rmode_));
827  if (IsComment(rmode_)) {
828  PrintF(out, " (%s)", reinterpret_cast<char*>(data_));
829  } else if (rmode_ == EMBEDDED_OBJECT) {
830  PrintF(out, " (");
831  target_object()->ShortPrint(out);
832  PrintF(out, ")");
833  } else if (rmode_ == EXTERNAL_REFERENCE) {
834  ExternalReferenceEncoder ref_encoder(isolate);
835  PrintF(out, " (%s) (%p)",
836  ref_encoder.NameOfAddress(target_reference()),
837  target_reference());
838  } else if (IsCodeTarget(rmode_)) {
839  Code* code = Code::GetCodeFromTargetAddress(target_address());
840  PrintF(out, " (%s) (%p)", Code::Kind2String(code->kind()),
841  target_address());
842  if (rmode_ == CODE_TARGET_WITH_ID) {
843  PrintF(out, " (id=%d)", static_cast<int>(data_));
844  }
845  } else if (IsPosition(rmode_)) {
846  PrintF(out, " (%" V8_PTR_PREFIX "d)", data());
847  } else if (IsRuntimeEntry(rmode_) &&
848  isolate->deoptimizer_data() != NULL) {
849  // Depotimization bailouts are stored as runtime entries.
851  isolate, target_address(), Deoptimizer::EAGER);
853  PrintF(out, " (deoptimization bailout %d)", id);
854  }
855  }
856 
857  PrintF(out, "\n");
858 }
859 #endif // ENABLE_DISASSEMBLER
860 
861 
862 #ifdef VERIFY_HEAP
863 void RelocInfo::Verify() {
864  switch (rmode_) {
865  case EMBEDDED_OBJECT:
866  Object::VerifyPointer(target_object());
867  break;
868  case CELL:
869  Object::VerifyPointer(target_cell());
870  break;
871  case DEBUG_BREAK:
872 #ifndef ENABLE_DEBUGGER_SUPPORT
873  UNREACHABLE();
874  break;
875 #endif
876  case CONSTRUCT_CALL:
877  case CODE_TARGET_WITH_ID:
878  case CODE_TARGET: {
879  // convert inline target address to code object
880  Address addr = target_address();
881  CHECK(addr != NULL);
882  // Check that we can find the right code object.
883  Code* code = Code::GetCodeFromTargetAddress(addr);
884  Object* found = code->GetIsolate()->FindCodeObject(addr);
885  CHECK(found->IsCode());
886  CHECK(code->address() == HeapObject::cast(found)->address());
887  break;
888  }
889  case RUNTIME_ENTRY:
890  case JS_RETURN:
891  case COMMENT:
892  case POSITION:
893  case STATEMENT_POSITION:
894  case EXTERNAL_REFERENCE:
895  case INTERNAL_REFERENCE:
896  case CONST_POOL:
897  case VENEER_POOL:
898  case DEBUG_BREAK_SLOT:
899  case NONE32:
900  case NONE64:
901  break;
902  case NUMBER_OF_MODES:
903  UNREACHABLE();
904  break;
905  case CODE_AGE_SEQUENCE:
906  ASSERT(Code::IsYoungSequence(pc_) || code_age_stub()->IsCode());
907  break;
908  }
909 }
910 #endif // VERIFY_HEAP
911 
912 
913 // -----------------------------------------------------------------------------
914 // Implementation of ExternalReference
915 
916 void ExternalReference::SetUp() {
917  double_constants.min_int = kMinInt;
918  double_constants.one_half = 0.5;
919  double_constants.minus_one_half = -0.5;
920  double_constants.minus_zero = -0.0;
921  double_constants.uint8_max_value = 255;
922  double_constants.zero = 0.0;
923  double_constants.canonical_non_hole_nan = OS::nan_value();
924  double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
925  double_constants.negative_infinity = -V8_INFINITY;
926  double_constants.uint32_bias =
927  static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
928 
929  math_exp_data_mutex = new Mutex();
930 }
931 
932 
933 void ExternalReference::InitializeMathExpData() {
934  // Early return?
935  if (math_exp_data_initialized) return;
936 
937  LockGuard<Mutex> lock_guard(math_exp_data_mutex);
938  if (!math_exp_data_initialized) {
939  // If this is changed, generated code must be adapted too.
940  const int kTableSizeBits = 11;
941  const int kTableSize = 1 << kTableSizeBits;
942  const double kTableSizeDouble = static_cast<double>(kTableSize);
943 
944  math_exp_constants_array = new double[9];
945  // Input values smaller than this always return 0.
946  math_exp_constants_array[0] = -708.39641853226408;
947  // Input values larger than this always return +Infinity.
948  math_exp_constants_array[1] = 709.78271289338397;
949  math_exp_constants_array[2] = V8_INFINITY;
950  // The rest is black magic. Do not attempt to understand it. It is
951  // loosely based on the "expd" function published at:
952  // http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html
953  const double constant3 = (1 << kTableSizeBits) / std::log(2.0);
954  math_exp_constants_array[3] = constant3;
955  math_exp_constants_array[4] =
956  static_cast<double>(static_cast<int64_t>(3) << 51);
957  math_exp_constants_array[5] = 1 / constant3;
958  math_exp_constants_array[6] = 3.0000000027955394;
959  math_exp_constants_array[7] = 0.16666666685227835;
960  math_exp_constants_array[8] = 1;
961 
962  math_exp_log_table_array = new double[kTableSize];
963  for (int i = 0; i < kTableSize; i++) {
964  double value = std::pow(2, i / kTableSizeDouble);
965  uint64_t bits = BitCast<uint64_t, double>(value);
966  bits &= (static_cast<uint64_t>(1) << 52) - 1;
967  double mantissa = BitCast<double, uint64_t>(bits);
968  math_exp_log_table_array[i] = mantissa;
969  }
970 
971  math_exp_data_initialized = true;
972  }
973 }
974 
975 
976 void ExternalReference::TearDownMathExpData() {
977  delete[] math_exp_constants_array;
978  delete[] math_exp_log_table_array;
979  delete math_exp_data_mutex;
980 }
981 
982 
983 ExternalReference::ExternalReference(Builtins::CFunctionId id, Isolate* isolate)
984  : address_(Redirect(isolate, Builtins::c_function_address(id))) {}
985 
986 
987 ExternalReference::ExternalReference(
988  ApiFunction* fun,
989  Type type = ExternalReference::BUILTIN_CALL,
990  Isolate* isolate = NULL)
991  : address_(Redirect(isolate, fun->address(), type)) {}
992 
993 
994 ExternalReference::ExternalReference(Builtins::Name name, Isolate* isolate)
995  : address_(isolate->builtins()->builtin_address(name)) {}
996 
997 
998 ExternalReference::ExternalReference(Runtime::FunctionId id,
999  Isolate* isolate)
1000  : address_(Redirect(isolate, Runtime::FunctionForId(id)->entry)) {}
1001 
1002 
1003 ExternalReference::ExternalReference(const Runtime::Function* f,
1004  Isolate* isolate)
1005  : address_(Redirect(isolate, f->entry)) {}
1006 
1007 
1008 ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
1009  return ExternalReference(isolate);
1010 }
1011 
1012 
1013 ExternalReference::ExternalReference(const IC_Utility& ic_utility,
1014  Isolate* isolate)
1015  : address_(Redirect(isolate, ic_utility.address())) {}
1016 
1017 #ifdef ENABLE_DEBUGGER_SUPPORT
1018 ExternalReference::ExternalReference(const Debug_Address& debug_address,
1019  Isolate* isolate)
1020  : address_(debug_address.address(isolate)) {}
1021 #endif
1022 
1023 ExternalReference::ExternalReference(StatsCounter* counter)
1024  : address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
1025 
1026 
1027 ExternalReference::ExternalReference(Isolate::AddressId id, Isolate* isolate)
1028  : address_(isolate->get_address_from_id(id)) {}
1029 
1030 
1031 ExternalReference::ExternalReference(const SCTableReference& table_ref)
1032  : address_(table_ref.address()) {}
1033 
1034 
1035 ExternalReference ExternalReference::
1036  incremental_marking_record_write_function(Isolate* isolate) {
1037  return ExternalReference(Redirect(
1038  isolate,
1040 }
1041 
1042 
1043 ExternalReference ExternalReference::
1044  store_buffer_overflow_function(Isolate* isolate) {
1045  return ExternalReference(Redirect(
1046  isolate,
1048 }
1049 
1050 
1051 ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) {
1052  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache)));
1053 }
1054 
1055 
1056 ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
1057  return
1058  ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC)));
1059 }
1060 
1061 
1062 ExternalReference ExternalReference::out_of_memory_function(Isolate* isolate) {
1063  return
1064  ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::OutOfMemory)));
1065 }
1066 
1067 
1068 ExternalReference ExternalReference::delete_handle_scope_extensions(
1069  Isolate* isolate) {
1070  return ExternalReference(Redirect(
1071  isolate,
1073 }
1074 
1075 
1076 ExternalReference ExternalReference::get_date_field_function(
1077  Isolate* isolate) {
1078  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(JSDate::GetField)));
1079 }
1080 
1081 
1082 ExternalReference ExternalReference::get_make_code_young_function(
1083  Isolate* isolate) {
1084  return ExternalReference(Redirect(
1086 }
1087 
1088 
1089 ExternalReference ExternalReference::get_mark_code_as_executed_function(
1090  Isolate* isolate) {
1091  return ExternalReference(Redirect(
1093 }
1094 
1095 
1096 ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
1097  return ExternalReference(isolate->date_cache()->stamp_address());
1098 }
1099 
1100 
1101 ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) {
1102  return ExternalReference(isolate->stress_deopt_count_address());
1103 }
1104 
1105 
1106 ExternalReference ExternalReference::new_deoptimizer_function(
1107  Isolate* isolate) {
1108  return ExternalReference(
1109  Redirect(isolate, FUNCTION_ADDR(Deoptimizer::New)));
1110 }
1111 
1112 
1113 ExternalReference ExternalReference::compute_output_frames_function(
1114  Isolate* isolate) {
1115  return ExternalReference(
1116  Redirect(isolate, FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
1117 }
1118 
1119 
1120 ExternalReference ExternalReference::log_enter_external_function(
1121  Isolate* isolate) {
1122  return ExternalReference(
1123  Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal)));
1124 }
1125 
1126 
1127 ExternalReference ExternalReference::log_leave_external_function(
1128  Isolate* isolate) {
1129  return ExternalReference(
1130  Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal)));
1131 }
1132 
1133 
1134 ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
1135  return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
1136 }
1137 
1138 
1139 ExternalReference ExternalReference::keyed_lookup_cache_field_offsets(
1140  Isolate* isolate) {
1141  return ExternalReference(
1142  isolate->keyed_lookup_cache()->field_offsets_address());
1143 }
1144 
1145 
1146 ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
1147  return ExternalReference(isolate->heap()->roots_array_start());
1148 }
1149 
1150 
1151 ExternalReference ExternalReference::allocation_sites_list_address(
1152  Isolate* isolate) {
1153  return ExternalReference(isolate->heap()->allocation_sites_list_address());
1154 }
1155 
1156 
1157 ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) {
1158  return ExternalReference(isolate->stack_guard()->address_of_jslimit());
1159 }
1160 
1161 
1162 ExternalReference ExternalReference::address_of_real_stack_limit(
1163  Isolate* isolate) {
1164  return ExternalReference(isolate->stack_guard()->address_of_real_jslimit());
1165 }
1166 
1167 
1168 ExternalReference ExternalReference::address_of_regexp_stack_limit(
1169  Isolate* isolate) {
1170  return ExternalReference(isolate->regexp_stack()->limit_address());
1171 }
1172 
1173 
1174 ExternalReference ExternalReference::new_space_start(Isolate* isolate) {
1175  return ExternalReference(isolate->heap()->NewSpaceStart());
1176 }
1177 
1178 
1179 ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
1180  return ExternalReference(isolate->heap()->store_buffer()->TopAddress());
1181 }
1182 
1183 
1184 ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
1185  return ExternalReference(reinterpret_cast<Address>(
1186  isolate->heap()->NewSpaceMask()));
1187 }
1188 
1189 
1190 ExternalReference ExternalReference::new_space_allocation_top_address(
1191  Isolate* isolate) {
1192  return ExternalReference(isolate->heap()->NewSpaceAllocationTopAddress());
1193 }
1194 
1195 
1196 ExternalReference ExternalReference::heap_always_allocate_scope_depth(
1197  Isolate* isolate) {
1198  Heap* heap = isolate->heap();
1199  return ExternalReference(heap->always_allocate_scope_depth_address());
1200 }
1201 
1202 
1203 ExternalReference ExternalReference::new_space_allocation_limit_address(
1204  Isolate* isolate) {
1205  return ExternalReference(isolate->heap()->NewSpaceAllocationLimitAddress());
1206 }
1207 
1208 
1209 ExternalReference ExternalReference::old_pointer_space_allocation_top_address(
1210  Isolate* isolate) {
1211  return ExternalReference(
1212  isolate->heap()->OldPointerSpaceAllocationTopAddress());
1213 }
1214 
1215 
1216 ExternalReference ExternalReference::old_pointer_space_allocation_limit_address(
1217  Isolate* isolate) {
1218  return ExternalReference(
1219  isolate->heap()->OldPointerSpaceAllocationLimitAddress());
1220 }
1221 
1222 
1223 ExternalReference ExternalReference::old_data_space_allocation_top_address(
1224  Isolate* isolate) {
1225  return ExternalReference(
1226  isolate->heap()->OldDataSpaceAllocationTopAddress());
1227 }
1228 
1229 
1230 ExternalReference ExternalReference::old_data_space_allocation_limit_address(
1231  Isolate* isolate) {
1232  return ExternalReference(
1233  isolate->heap()->OldDataSpaceAllocationLimitAddress());
1234 }
1235 
1236 
1237 ExternalReference ExternalReference::
1238  new_space_high_promotion_mode_active_address(Isolate* isolate) {
1239  return ExternalReference(
1240  isolate->heap()->NewSpaceHighPromotionModeActiveAddress());
1241 }
1242 
1243 
1244 ExternalReference ExternalReference::handle_scope_level_address(
1245  Isolate* isolate) {
1246  return ExternalReference(HandleScope::current_level_address(isolate));
1247 }
1248 
1249 
1250 ExternalReference ExternalReference::handle_scope_next_address(
1251  Isolate* isolate) {
1252  return ExternalReference(HandleScope::current_next_address(isolate));
1253 }
1254 
1255 
1256 ExternalReference ExternalReference::handle_scope_limit_address(
1257  Isolate* isolate) {
1258  return ExternalReference(HandleScope::current_limit_address(isolate));
1259 }
1260 
1261 
1262 ExternalReference ExternalReference::scheduled_exception_address(
1263  Isolate* isolate) {
1264  return ExternalReference(isolate->scheduled_exception_address());
1265 }
1266 
1267 
1268 ExternalReference ExternalReference::address_of_pending_message_obj(
1269  Isolate* isolate) {
1270  return ExternalReference(isolate->pending_message_obj_address());
1271 }
1272 
1273 
1274 ExternalReference ExternalReference::address_of_has_pending_message(
1275  Isolate* isolate) {
1276  return ExternalReference(isolate->has_pending_message_address());
1277 }
1278 
1279 
1280 ExternalReference ExternalReference::address_of_pending_message_script(
1281  Isolate* isolate) {
1282  return ExternalReference(isolate->pending_message_script_address());
1283 }
1284 
1285 
1286 ExternalReference ExternalReference::address_of_min_int() {
1287  return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int));
1288 }
1289 
1290 
1291 ExternalReference ExternalReference::address_of_one_half() {
1292  return ExternalReference(reinterpret_cast<void*>(&double_constants.one_half));
1293 }
1294 
1295 
1296 ExternalReference ExternalReference::address_of_minus_one_half() {
1297  return ExternalReference(
1298  reinterpret_cast<void*>(&double_constants.minus_one_half));
1299 }
1300 
1301 
1302 ExternalReference ExternalReference::address_of_minus_zero() {
1303  return ExternalReference(
1304  reinterpret_cast<void*>(&double_constants.minus_zero));
1305 }
1306 
1307 
1308 ExternalReference ExternalReference::address_of_zero() {
1309  return ExternalReference(reinterpret_cast<void*>(&double_constants.zero));
1310 }
1311 
1312 
1313 ExternalReference ExternalReference::address_of_uint8_max_value() {
1314  return ExternalReference(
1315  reinterpret_cast<void*>(&double_constants.uint8_max_value));
1316 }
1317 
1318 
1319 ExternalReference ExternalReference::address_of_negative_infinity() {
1320  return ExternalReference(
1321  reinterpret_cast<void*>(&double_constants.negative_infinity));
1322 }
1323 
1324 
1325 ExternalReference ExternalReference::address_of_canonical_non_hole_nan() {
1326  return ExternalReference(
1327  reinterpret_cast<void*>(&double_constants.canonical_non_hole_nan));
1328 }
1329 
1330 
1331 ExternalReference ExternalReference::address_of_the_hole_nan() {
1332  return ExternalReference(
1333  reinterpret_cast<void*>(&double_constants.the_hole_nan));
1334 }
1335 
1336 
1337 ExternalReference ExternalReference::address_of_uint32_bias() {
1338  return ExternalReference(
1339  reinterpret_cast<void*>(&double_constants.uint32_bias));
1340 }
1341 
1342 
1343 #ifndef V8_INTERPRETED_REGEXP
1344 
1345 ExternalReference ExternalReference::re_check_stack_guard_state(
1346  Isolate* isolate) {
1347  Address function;
1348 #if V8_TARGET_ARCH_X64
1350 #elif V8_TARGET_ARCH_IA32
1352 #elif V8_TARGET_ARCH_ARM64
1354 #elif V8_TARGET_ARCH_ARM
1356 #elif V8_TARGET_ARCH_MIPS
1358 #else
1359  UNREACHABLE();
1360 #endif
1361  return ExternalReference(Redirect(isolate, function));
1362 }
1363 
1364 
1365 ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) {
1366  return ExternalReference(
1368 }
1369 
1370 ExternalReference ExternalReference::re_case_insensitive_compare_uc16(
1371  Isolate* isolate) {
1372  return ExternalReference(Redirect(
1373  isolate,
1375 }
1376 
1377 
1378 ExternalReference ExternalReference::re_word_character_map() {
1379  return ExternalReference(
1381 }
1382 
1383 ExternalReference ExternalReference::address_of_static_offsets_vector(
1384  Isolate* isolate) {
1385  return ExternalReference(
1386  reinterpret_cast<Address>(isolate->jsregexp_static_offsets_vector()));
1387 }
1388 
1389 ExternalReference ExternalReference::address_of_regexp_stack_memory_address(
1390  Isolate* isolate) {
1391  return ExternalReference(
1392  isolate->regexp_stack()->memory_address());
1393 }
1394 
1395 ExternalReference ExternalReference::address_of_regexp_stack_memory_size(
1396  Isolate* isolate) {
1397  return ExternalReference(isolate->regexp_stack()->memory_size_address());
1398 }
1399 
1400 #endif // V8_INTERPRETED_REGEXP
1401 
1402 
1403 ExternalReference ExternalReference::math_log_double_function(
1404  Isolate* isolate) {
1405  typedef double (*d2d)(double x);
1406  return ExternalReference(Redirect(isolate,
1407  FUNCTION_ADDR(static_cast<d2d>(std::log)),
1408  BUILTIN_FP_CALL));
1409 }
1410 
1411 
1412 ExternalReference ExternalReference::math_exp_constants(int constant_index) {
1413  ASSERT(math_exp_data_initialized);
1414  return ExternalReference(
1415  reinterpret_cast<void*>(math_exp_constants_array + constant_index));
1416 }
1417 
1418 
1419 ExternalReference ExternalReference::math_exp_log_table() {
1420  ASSERT(math_exp_data_initialized);
1421  return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array));
1422 }
1423 
1424 
1425 ExternalReference ExternalReference::page_flags(Page* page) {
1426  return ExternalReference(reinterpret_cast<Address>(page) +
1428 }
1429 
1430 
1431 ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
1432  return ExternalReference(entry);
1433 }
1434 
1435 
1436 double power_helper(double x, double y) {
1437  int y_int = static_cast<int>(y);
1438  if (y == y_int) {
1439  return power_double_int(x, y_int); // Returns 1 if exponent is 0.
1440  }
1441  if (y == 0.5) {
1442  return (std::isinf(x)) ? V8_INFINITY
1443  : fast_sqrt(x + 0.0); // Convert -0 to +0.
1444  }
1445  if (y == -0.5) {
1446  return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
1447  }
1448  return power_double_double(x, y);
1449 }
1450 
1451 
1452 // Helper function to compute x^y, where y is known to be an
1453 // integer. Uses binary decomposition to limit the number of
1454 // multiplications; see the discussion in "Hacker's Delight" by Henry
1455 // S. Warren, Jr., figure 11-6, page 213.
1456 double power_double_int(double x, int y) {
1457  double m = (y < 0) ? 1 / x : x;
1458  unsigned n = (y < 0) ? -y : y;
1459  double p = 1;
1460  while (n != 0) {
1461  if ((n & 1) != 0) p *= m;
1462  m *= m;
1463  if ((n & 2) != 0) p *= m;
1464  m *= m;
1465  n >>= 2;
1466  }
1467  return p;
1468 }
1469 
1470 
1471 double power_double_double(double x, double y) {
1472 #if defined(__MINGW64_VERSION_MAJOR) && \
1473  (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)
1474  // MinGW64 has a custom implementation for pow. This handles certain
1475  // special cases that are different.
1476  if ((x == 0.0 || std::isinf(x)) && std::isfinite(y)) {
1477  double f;
1478  if (std::modf(y, &f) != 0.0) {
1479  return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0;
1480  }
1481  }
1482 
1483  if (x == 2.0) {
1484  int y_int = static_cast<int>(y);
1485  if (y == y_int) {
1486  return std::ldexp(1.0, y_int);
1487  }
1488  }
1489 #endif
1490 
1491  // The checks for special cases can be dropped in ia32 because it has already
1492  // been done in generated code before bailing out here.
1493  if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
1494  return OS::nan_value();
1495  }
1496  return std::pow(x, y);
1497 }
1498 
1499 
1500 ExternalReference ExternalReference::power_double_double_function(
1501  Isolate* isolate) {
1502  return ExternalReference(Redirect(isolate,
1504  BUILTIN_FP_FP_CALL));
1505 }
1506 
1507 
1508 ExternalReference ExternalReference::power_double_int_function(
1509  Isolate* isolate) {
1510  return ExternalReference(Redirect(isolate,
1512  BUILTIN_FP_INT_CALL));
1513 }
1514 
1515 
1516 bool EvalComparison(Token::Value op, double op1, double op2) {
1518  switch (op) {
1519  case Token::EQ:
1520  case Token::EQ_STRICT: return (op1 == op2);
1521  case Token::NE: return (op1 != op2);
1522  case Token::LT: return (op1 < op2);
1523  case Token::GT: return (op1 > op2);
1524  case Token::LTE: return (op1 <= op2);
1525  case Token::GTE: return (op1 >= op2);
1526  default:
1527  UNREACHABLE();
1528  return false;
1529  }
1530 }
1531 
1532 
1533 ExternalReference ExternalReference::mod_two_doubles_operation(
1534  Isolate* isolate) {
1535  return ExternalReference(Redirect(isolate,
1537  BUILTIN_FP_FP_CALL));
1538 }
1539 
1540 
1541 #ifdef ENABLE_DEBUGGER_SUPPORT
1542 ExternalReference ExternalReference::debug_break(Isolate* isolate) {
1543  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug_Break)));
1544 }
1545 
1546 
1547 ExternalReference ExternalReference::debug_step_in_fp_address(
1548  Isolate* isolate) {
1549  return ExternalReference(isolate->debug()->step_in_fp_addr());
1550 }
1551 #endif
1552 
1553 
1554 void PositionsRecorder::RecordPosition(int pos) {
1555  ASSERT(pos != RelocInfo::kNoPosition);
1556  ASSERT(pos >= 0);
1557  state_.current_position = pos;
1558 #ifdef ENABLE_GDB_JIT_INTERFACE
1559  if (gdbjit_lineinfo_ != NULL) {
1560  gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, false);
1561  }
1562 #endif
1563  LOG_CODE_EVENT(assembler_->isolate(),
1564  CodeLinePosInfoAddPositionEvent(jit_handler_data_,
1565  assembler_->pc_offset(),
1566  pos));
1567 }
1568 
1569 
1570 void PositionsRecorder::RecordStatementPosition(int pos) {
1571  ASSERT(pos != RelocInfo::kNoPosition);
1572  ASSERT(pos >= 0);
1573  state_.current_statement_position = pos;
1574 #ifdef ENABLE_GDB_JIT_INTERFACE
1575  if (gdbjit_lineinfo_ != NULL) {
1576  gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, true);
1577  }
1578 #endif
1579  LOG_CODE_EVENT(assembler_->isolate(),
1580  CodeLinePosInfoAddStatementPositionEvent(
1581  jit_handler_data_,
1582  assembler_->pc_offset(),
1583  pos));
1584 }
1585 
1586 
1587 bool PositionsRecorder::WriteRecordedPositions() {
1588  bool written = false;
1589 
1590  // Write the statement position if it is different from what was written last
1591  // time.
1592  if (state_.current_statement_position != state_.written_statement_position) {
1593  EnsureSpace ensure_space(assembler_);
1594  assembler_->RecordRelocInfo(RelocInfo::STATEMENT_POSITION,
1595  state_.current_statement_position);
1596  state_.written_statement_position = state_.current_statement_position;
1597  written = true;
1598  }
1599 
1600  // Write the position if it is different from what was written last time and
1601  // also different from the written statement position.
1602  if (state_.current_position != state_.written_position &&
1603  state_.current_position != state_.written_statement_position) {
1604  EnsureSpace ensure_space(assembler_);
1605  assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position);
1606  state_.written_position = state_.current_position;
1607  written = true;
1608  }
1609 
1610  // Return whether something was written.
1611  return written;
1612 }
1613 
1614 
1616  ASSERT(d <= -2 || 2 <= d);
1617  const uint32_t two31 = 0x80000000;
1618  uint32_t ad = Abs(d);
1619  uint32_t t = two31 + (uint32_t(d) >> 31);
1620  uint32_t anc = t - 1 - t % ad; // Absolute value of nc.
1621  int32_t p = 31; // Init. p.
1622  uint32_t q1 = two31 / anc; // Init. q1 = 2**p/|nc|.
1623  uint32_t r1 = two31 - q1 * anc; // Init. r1 = rem(2**p, |nc|).
1624  uint32_t q2 = two31 / ad; // Init. q2 = 2**p/|d|.
1625  uint32_t r2 = two31 - q2 * ad; // Init. r2 = rem(2**p, |d|).
1626  uint32_t delta;
1627  do {
1628  p++;
1629  q1 *= 2; // Update q1 = 2**p/|nc|.
1630  r1 *= 2; // Update r1 = rem(2**p, |nc|).
1631  if (r1 >= anc) { // Must be an unsigned comparison here.
1632  q1++;
1633  r1 = r1 - anc;
1634  }
1635  q2 *= 2; // Update q2 = 2**p/|d|.
1636  r2 *= 2; // Update r2 = rem(2**p, |d|).
1637  if (r2 >= ad) { // Must be an unsigned comparison here.
1638  q2++;
1639  r2 = r2 - ad;
1640  }
1641  delta = ad - r2;
1642  } while (q1 < delta || (q1 == delta && r1 == 0));
1643  int32_t mul = static_cast<int32_t>(q2 + 1);
1644  multiplier_ = (d < 0) ? -mul : mul;
1645  shift_ = p - 32;
1646 }
1647 
1648 } } // namespace v8::internal
byte * Address
Definition: globals.h:186
RelocIterator(Code *code, int mode_mask=-1)
Definition: assembler.cc:713
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
const int kMinInt
Definition: globals.h:249
const int kPCJumpExtraTag
Definition: assembler.cc:330
Isolate * isolate() const
Definition: assembler.h:62
#define CHECK_EQ(expected, value)
Definition: checks.h:252
const int kLastChunkTagBits
Definition: assembler.cc:339
void PrintF(const char *format,...)
Definition: v8utils.cc:40
static void EnterExternal(Isolate *isolate)
Definition: log.cc:1113
const int kNonstatementPositionTag
Definition: assembler.cc:347
static void ComputeOutputFrames(Deoptimizer *deoptimizer)
Definition: deoptimizer.cc:526
static HeapObject * cast(Object *obj)
static bool IsCompareOp(Value op)
Definition: token.h:220
double power_helper(double x, double y)
Definition: assembler.cc:1436
double fast_sqrt(double input)
kSerializedDataOffset Object
Definition: objects-inl.h:5016
TypeImpl< ZoneTypeConfig > Type
int int32_t
Definition: unicode.cc:47
static const int kMinimalBufferSize
Definition: assembler.h:89
static bool enabled()
Definition: serialize.h:485
RandomNumberGenerator * random_number_generator()
Definition: isolate-inl.h:75
#define ASSERT(condition)
Definition: checks.h:329
static void StoreBufferOverflow(Isolate *isolate)
static bool IsSafeForSnapshot(CpuFeature f)
Definition: assembler-arm.h:78
static const int kFlagsOffset
Definition: spaces.h:655
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including on console Map counters to a file Enable debugger compile events enable GDBJIT enable GDBJIT interface for all code objects dump only objects containing this substring stress the GC compactor to flush out pretty print source code print source AST function name where to insert a breakpoint print scopes for builtins trace contexts operations print stuff during garbage collection report code statistics after GC report handles after GC trace cache state transitions print interface inference details prints when objects are turned into dictionaries report heap spill statistics along with trace isolate state changes trace regexp bytecode execution Minimal Log all events to the log file Log API events to the log file Log heap samples on garbage collection for the hp2ps tool log positions Log suspect operations Used with turns on browser compatible mode for profiling v8 log
const int kTagBits
Definition: assembler.cc:319
int isfinite(double x)
const int kLocatableTypeTagBits
Definition: assembler.cc:322
#define CHECK(condition)
Definition: checks.h:75
static void MarkCodeAsExecuted(byte *sequence, Isolate *isolate)
Definition: objects.cc:10636
PredictableCodeSizeScope(AssemblerBase *assembler, int expected_size)
Definition: assembler.cc:168
const Register r2
const int kCommentTag
Definition: assembler.cc:349
const QwNeonRegister q2
const int kIntSize
Definition: globals.h:263
bool is_intn(int64_t x, unsigned n)
Definition: utils.h:1102
#define V8_INFINITY
Definition: globals.h:44
int isnan(double x)
const int kTagMask
Definition: assembler.cc:320
uint8_t byte
Definition: globals.h:185
static void MakeCodeAgeSequenceYoung(byte *sequence, Isolate *isolate)
Definition: objects.cc:10631
const int kStatementPositionTag
Definition: assembler.cc:348
const uint64_t kHoleNanInt64
Definition: v8globals.h:458
static int CheckStackGuardState(Address *return_address, Code *re_code, Address re_frame)
#define UNREACHABLE()
Definition: checks.h:52
const int kSmallPCDeltaBits
Definition: assembler.cc:332
const int kVeneerPoolTag
Definition: assembler.cc:353
const int kExtraTagBits
Definition: assembler.cc:321
const int kConstPoolTag
Definition: assembler.cc:352
byte * instruction_start()
Definition: objects-inl.h:5857
const int kSmallDataBits
Definition: assembler.cc:323
const int kChunkMask
Definition: assembler.cc:338
static int CaseInsensitiveCompareUC16(Address byte_offset1, Address byte_offset2, size_t byte_length, Isolate *isolate)
static void PerformGC(Object *result, Isolate *isolate)
Definition: runtime.cc:15159
AssemblerBase(Isolate *isolate, void *buffer, int buffer_size)
Definition: assembler.cc:120
double power_double_double(double x, double y)
Definition: assembler.cc:1471
static int GetDeoptimizationId(Isolate *isolate, Address addr, BailoutType type)
Definition: deoptimizer.cc:701
void set_enabled_cpu_features(uint64_t features)
Definition: assembler.h:72
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
static const char * Kind2String(Kind kind)
Definition: objects.cc:10803
const int kChunkBits
Definition: assembler.cc:337
double modulo(double x, double y)
static Code * GetCodeFromTargetAddress(Address address)
Definition: objects-inl.h:4662
double power_double_int(double x, int y)
Definition: assembler.cc:1456
#define V8_PTR_PREFIX
Definition: globals.h:220
const int kBitsPerByte
Definition: globals.h:287
byte * relocation_start()
Definition: objects-inl.h:5877
#define BASE_EMBEDDED
Definition: allocation.h:68
int isinf(double x)
static int CheckStackGuardState(Address *return_address, Code *re_code, Address re_frame, int start_offset, const byte **input_start, const byte **input_end)
const QwNeonRegister q1
static Address current_limit_address(Isolate *isolate)
Definition: handles.cc:122
static Object * GetField(Object *date, Smi *index)
Definition: objects.cc:16167
static int CheckStackGuardState(Address *return_address, Code *re_code, Address re_frame)
const int kDataJumpExtraTag
Definition: assembler.cc:344
void set_predictable_code_size(bool value)
Definition: assembler.h:69
static int CheckStackGuardState(Address *return_address, Code *re_code, Address re_frame)
const Register r1
static double nan_value()
const int kSmallPCDeltaMask
Definition: assembler.cc:333
const int kCodeWithIdTag
Definition: assembler.cc:346
static const int kNotDeoptimizationEntry
Definition: deoptimizer.h:258
static void LeaveExternal(Isolate *isolate)
Definition: log.cc:1120
uint64_t enabled_cpu_features() const
Definition: assembler.h:71
const int kLastChunkTag
Definition: assembler.cc:341
static void RecordWriteFromCode(HeapObject *obj, Object **slot, Isolate *isolate)
static Address GrowStack(Address stack_pointer, Address *stack_top, Isolate *isolate)
T Abs(T a)
Definition: utils.h:241
const int kLastChunkTagMask
Definition: assembler.cc:340
static bool IsYoungSequence(byte *sequence)
void Print(const v8::FunctionCallbackInfo< v8::Value > &args)
#define RUNTIME_ENTRY(name, nargs, ressize)
const int kPoolExtraTag
Definition: assembler.cc:351
const int kDefaultTag
Definition: assembler.cc:328
void DeleteArray(T *array)
Definition: allocation.h:91
#define FUNCTION_ADDR(f)
Definition: globals.h:345
#define LOG_CODE_EVENT(isolate, Call)
Definition: log.h:94
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
int expected_size
const int kLocatableTag
Definition: assembler.cc:327
static int CheckStackGuardState(Address *return_address, Code *re_code, Address re_frame)
static Address current_next_address(Isolate *isolate)
Definition: handles.cc:117
bool EvalComparison(Token::Value op, double op1, double op2)
Definition: assembler.cc:1516
static void OutOfMemory()
Definition: runtime.cc:15180
static Address current_level_address(Isolate *isolate)
Definition: handles.cc:112
const int kIntptrSize
Definition: globals.h:267
const int kCodeTargetTag
Definition: assembler.cc:326
static void DeleteExtensions(Isolate *isolate)
Definition: handles.cc:96
bool is_uintn(int64_t x, unsigned n)
Definition: utils.h:1108
static Deoptimizer * New(JSFunction *function, BailoutType type, unsigned bailout_id, Address from, int fp_to_sp_delta, Isolate *isolate)
Definition: deoptimizer.cc:105
const int kEmbeddedObjectTag
Definition: assembler.cc:325
const int kVariableLengthPCJumpTopTag
Definition: assembler.cc:336