v8  3.25.30(node0.11.13)
V8 is Google's open source JavaScript engine
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
lithium.cc
Go to the documentation of this file.
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 #include "lithium.h"
30 #include "scopes.h"
31 
32 #if V8_TARGET_ARCH_IA32
33 #include "ia32/lithium-ia32.h"
35 #elif V8_TARGET_ARCH_X64
36 #include "x64/lithium-x64.h"
38 #elif V8_TARGET_ARCH_ARM
39 #include "arm/lithium-arm.h"
41 #elif V8_TARGET_ARCH_MIPS
42 #include "mips/lithium-mips.h"
44 #elif V8_TARGET_ARCH_ARM64
45 #include "arm64/lithium-arm64.h"
47 #else
48 #error "Unknown architecture."
49 #endif
50 
51 namespace v8 {
52 namespace internal {
53 
54 
56  LUnallocated* unalloc = NULL;
57  switch (kind()) {
58  case INVALID:
59  stream->Add("(0)");
60  break;
61  case UNALLOCATED:
62  unalloc = LUnallocated::cast(this);
63  stream->Add("v%d", unalloc->virtual_register());
64  if (unalloc->basic_policy() == LUnallocated::FIXED_SLOT) {
65  stream->Add("(=%dS)", unalloc->fixed_slot_index());
66  break;
67  }
68  switch (unalloc->extended_policy()) {
69  case LUnallocated::NONE:
70  break;
72  int reg_index = unalloc->fixed_register_index();
73  const char* register_name =
75  stream->Add("(=%s)", register_name);
76  break;
77  }
79  int reg_index = unalloc->fixed_register_index();
80  const char* double_register_name =
82  stream->Add("(=%s)", double_register_name);
83  break;
84  }
86  stream->Add("(R)");
87  break;
89  stream->Add("(WR)");
90  break;
92  stream->Add("(1)");
93  break;
94  case LUnallocated::ANY:
95  stream->Add("(-)");
96  break;
97  }
98  break;
99  case CONSTANT_OPERAND:
100  stream->Add("[constant:%d]", index());
101  break;
102  case STACK_SLOT:
103  stream->Add("[stack:%d]", index());
104  break;
105  case DOUBLE_STACK_SLOT:
106  stream->Add("[double_stack:%d]", index());
107  break;
108  case REGISTER:
109  stream->Add("[%s|R]", Register::AllocationIndexToString(index()));
110  break;
111  case DOUBLE_REGISTER:
112  stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
113  break;
114  }
115 }
116 
117 
118 template<LOperand::Kind kOperandKind, int kNumCachedOperands>
119 LSubKindOperand<kOperandKind, kNumCachedOperands>*
120 LSubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
121 
122 
123 template<LOperand::Kind kOperandKind, int kNumCachedOperands>
124 void LSubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
125  if (cache) return;
126  cache = new LSubKindOperand[kNumCachedOperands];
127  for (int i = 0; i < kNumCachedOperands; i++) {
128  cache[i].ConvertTo(kOperandKind, i);
129  }
130 }
131 
132 
133 template<LOperand::Kind kOperandKind, int kNumCachedOperands>
134 void LSubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
135  delete[] cache;
136 }
137 
138 
140 #define LITHIUM_OPERAND_SETUP(name, type, number) L##name::SetUpCache();
142 #undef LITHIUM_OPERAND_SETUP
143 }
144 
145 
147 #define LITHIUM_OPERAND_TEARDOWN(name, type, number) L##name::TearDownCache();
149 #undef LITHIUM_OPERAND_TEARDOWN
150 }
151 
152 
153 bool LParallelMove::IsRedundant() const {
154  for (int i = 0; i < move_operands_.length(); ++i) {
155  if (!move_operands_[i].IsRedundant()) return false;
156  }
157  return true;
158 }
159 
160 
161 void LParallelMove::PrintDataTo(StringStream* stream) const {
162  bool first = true;
163  for (int i = 0; i < move_operands_.length(); ++i) {
164  if (!move_operands_[i].IsEliminated()) {
165  LOperand* source = move_operands_[i].source();
166  LOperand* destination = move_operands_[i].destination();
167  if (!first) stream->Add(" ");
168  first = false;
169  if (source->Equals(destination)) {
170  destination->PrintTo(stream);
171  } else {
172  destination->PrintTo(stream);
173  stream->Add(" = ");
174  source->PrintTo(stream);
175  }
176  stream->Add(";");
177  }
178  }
179 }
180 
181 
182 void LEnvironment::PrintTo(StringStream* stream) {
183  stream->Add("[id=%d|", ast_id().ToInt());
184  if (deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
185  stream->Add("deopt_id=%d|", deoptimization_index());
186  }
187  stream->Add("parameters=%d|", parameter_count());
188  stream->Add("arguments_stack_height=%d|", arguments_stack_height());
189  for (int i = 0; i < values_.length(); ++i) {
190  if (i != 0) stream->Add(";");
191  if (values_[i] == NULL) {
192  stream->Add("[hole]");
193  } else {
194  values_[i]->PrintTo(stream);
195  }
196  }
197  stream->Add("]");
198 }
199 
200 
201 void LPointerMap::RecordPointer(LOperand* op, Zone* zone) {
202  // Do not record arguments as pointers.
203  if (op->IsStackSlot() && op->index() < 0) return;
204  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
205  pointer_operands_.Add(op, zone);
206 }
207 
208 
209 void LPointerMap::RemovePointer(LOperand* op) {
210  // Do not record arguments as pointers.
211  if (op->IsStackSlot() && op->index() < 0) return;
212  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
213  for (int i = 0; i < pointer_operands_.length(); ++i) {
214  if (pointer_operands_[i]->Equals(op)) {
215  pointer_operands_.Remove(i);
216  --i;
217  }
218  }
219 }
220 
221 
222 void LPointerMap::RecordUntagged(LOperand* op, Zone* zone) {
223  // Do not record arguments as pointers.
224  if (op->IsStackSlot() && op->index() < 0) return;
225  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
226  untagged_operands_.Add(op, zone);
227 }
228 
229 
230 void LPointerMap::PrintTo(StringStream* stream) {
231  stream->Add("{");
232  for (int i = 0; i < pointer_operands_.length(); ++i) {
233  if (i != 0) stream->Add(";");
234  pointer_operands_[i]->PrintTo(stream);
235  }
236  stream->Add("}");
237 }
238 
239 
240 int StackSlotOffset(int index) {
241  if (index >= 0) {
242  // Local or spill slot. Skip the frame pointer, function, and
243  // context in the fixed part of the frame.
244  return -(index + 1) * kPointerSize -
246  } else {
247  // Incoming parameter. Skip the return address.
248  return -(index + 1) * kPointerSize + kFPOnStackSize + kPCOnStackSize;
249  }
250 }
251 
252 
254  : spill_slot_count_(0),
255  info_(info),
256  graph_(graph),
257  instructions_(32, graph->zone()),
258  pointer_maps_(8, graph->zone()),
259  inlined_closures_(1, graph->zone()) {
260 }
261 
262 
263 LLabel* LChunk::GetLabel(int block_id) const {
264  HBasicBlock* block = graph_->blocks()->at(block_id);
265  int first_instruction = block->first_instruction_index();
266  return LLabel::cast(instructions_[first_instruction]);
267 }
268 
269 
270 int LChunk::LookupDestination(int block_id) const {
271  LLabel* cur = GetLabel(block_id);
272  while (cur->replacement() != NULL) {
273  cur = cur->replacement();
274  }
275  return cur->block_id();
276 }
277 
278 Label* LChunk::GetAssemblyLabel(int block_id) const {
279  LLabel* label = GetLabel(block_id);
280  ASSERT(!label->HasReplacement());
281  return label->label();
282 }
283 
284 
286  LPhase phase("L_Mark empty blocks", this);
287  for (int i = 0; i < graph()->blocks()->length(); ++i) {
288  HBasicBlock* block = graph()->blocks()->at(i);
289  int first = block->first_instruction_index();
290  int last = block->last_instruction_index();
291  LInstruction* first_instr = instructions()->at(first);
292  LInstruction* last_instr = instructions()->at(last);
293 
294  LLabel* label = LLabel::cast(first_instr);
295  if (last_instr->IsGoto()) {
296  LGoto* goto_instr = LGoto::cast(last_instr);
297  if (label->IsRedundant() &&
298  !label->is_loop_header()) {
299  bool can_eliminate = true;
300  for (int i = first + 1; i < last && can_eliminate; ++i) {
301  LInstruction* cur = instructions()->at(i);
302  if (cur->IsGap()) {
303  LGap* gap = LGap::cast(cur);
304  if (!gap->IsRedundant()) {
305  can_eliminate = false;
306  }
307  } else {
308  can_eliminate = false;
309  }
310  }
311  if (can_eliminate) {
312  label->set_replacement(GetLabel(goto_instr->block_id()));
313  }
314  }
315  }
316  }
317 }
318 
319 
320 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
321  LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
322  gap->set_hydrogen_value(instr->hydrogen_value());
323  int index = -1;
324  if (instr->IsControl()) {
325  instructions_.Add(gap, zone());
326  index = instructions_.length();
327  instructions_.Add(instr, zone());
328  } else {
329  index = instructions_.length();
330  instructions_.Add(instr, zone());
331  instructions_.Add(gap, zone());
332  }
333  if (instr->HasPointerMap()) {
334  pointer_maps_.Add(instr->pointer_map(), zone());
335  instr->pointer_map()->set_lithium_position(index);
336  }
337 }
338 
339 
340 LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
341  return LConstantOperand::Create(constant->id(), zone());
342 }
343 
344 
345 int LChunk::GetParameterStackSlot(int index) const {
346  // The receiver is at index 0, the first parameter at index 1, so we
347  // shift all parameter indexes down by the number of parameters, and
348  // make sure they end up negative so they are distinguishable from
349  // spill slots.
350  int result = index - info()->num_parameters() - 1;
351 
352  ASSERT(result < 0);
353  return result;
354 }
355 
356 
357 // A parameter relative to ebp in the arguments stub.
358 int LChunk::ParameterAt(int index) {
359  ASSERT(-1 <= index); // -1 is the receiver.
360  return (1 + info()->scope()->num_parameters() - index) *
361  kPointerSize;
362 }
363 
364 
365 LGap* LChunk::GetGapAt(int index) const {
366  return LGap::cast(instructions_[index]);
367 }
368 
369 
370 bool LChunk::IsGapAt(int index) const {
371  return instructions_[index]->IsGap();
372 }
373 
374 
375 int LChunk::NearestGapPos(int index) const {
376  while (!IsGapAt(index)) index--;
377  return index;
378 }
379 
380 
381 void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
383  LGap::START, zone())->AddMove(from, to, zone());
384 }
385 
386 
387 HConstant* LChunk::LookupConstant(LConstantOperand* operand) const {
388  return HConstant::cast(graph_->LookupValue(operand->index()));
389 }
390 
391 
393  LConstantOperand* operand) const {
394  return graph_->LookupValue(operand->index())->representation();
395 }
396 
397 
398 LChunk* LChunk::NewChunk(HGraph* graph) {
399  DisallowHandleAllocation no_handles;
401  graph->DisallowAddingNewValues();
402  int values = graph->GetMaximumValueID();
403  CompilationInfo* info = graph->info();
404  if (values > LUnallocated::kMaxVirtualRegisters) {
405  info->set_bailout_reason(kNotEnoughVirtualRegistersForValues);
406  return NULL;
407  }
408  LAllocator allocator(values, graph);
409  LChunkBuilder builder(info, graph, &allocator);
410  LChunk* chunk = builder.Build();
411  if (chunk == NULL) return NULL;
412 
413  if (!allocator.Allocate(chunk)) {
414  info->set_bailout_reason(kNotEnoughVirtualRegistersRegalloc);
415  return NULL;
416  }
417 
419  allocator.assigned_double_registers());
420 
421  return chunk;
422 }
423 
424 
426  MacroAssembler assembler(info()->isolate(), NULL, 0);
428  CodeStartLinePosInfoRecordEvent(
429  assembler.positions_recorder()));
430  LCodeGen generator(this, &assembler, info());
431 
432  MarkEmptyBlocks();
433 
434  if (generator.GenerateCode()) {
435  CodeGenerator::MakeCodePrologue(info(), "optimized");
436  Code::Flags flags = info()->flags();
438  CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
439  generator.FinishCode(code);
440  code->set_is_crankshafted(true);
441  void* jit_handler_data =
442  assembler.positions_recorder()->DetachJITHandlerData();
444  CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
445 
447  return code;
448  }
449  assembler.AbortedCodeGeneration();
450  return Handle<Code>::null();
451 }
452 
453 
455  allocated_double_registers_ = allocated_registers;
457  BitVector::Iterator iterator(doubles);
458  while (!iterator.Done()) {
459  if (info()->saves_caller_doubles()) {
460  if (kDoubleSize == kPointerSize * 2) {
461  spill_slot_count_ += 2;
462  } else {
464  }
465  }
466  iterator.Advance();
467  }
468 }
469 
470 
471 LEnvironment* LChunkBuilderBase::CreateEnvironment(
472  HEnvironment* hydrogen_env,
473  int* argument_index_accumulator,
474  ZoneList<HValue*>* objects_to_materialize) {
475  if (hydrogen_env == NULL) return NULL;
476 
477  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
478  argument_index_accumulator,
479  objects_to_materialize);
480  BailoutId ast_id = hydrogen_env->ast_id();
481  ASSERT(!ast_id.IsNone() ||
482  hydrogen_env->frame_type() != JS_FUNCTION);
483  int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
484  LEnvironment* result =
485  new(zone()) LEnvironment(hydrogen_env->closure(),
486  hydrogen_env->frame_type(),
487  ast_id,
488  hydrogen_env->parameter_count(),
489  argument_count_,
490  value_count,
491  outer,
492  hydrogen_env->entry(),
493  zone());
494  int argument_index = *argument_index_accumulator;
495 
496  // Store the environment description into the environment
497  // (with holes for nested objects)
498  for (int i = 0; i < hydrogen_env->length(); ++i) {
499  if (hydrogen_env->is_special_index(i)) continue;
500 
501  LOperand* op;
502  HValue* value = hydrogen_env->values()->at(i);
503  CHECK(!value->IsPushArgument()); // Do not deopt outgoing arguments
504  if (value->IsArgumentsObject() || value->IsCapturedObject()) {
505  op = LEnvironment::materialization_marker();
506  } else {
507  op = UseAny(value);
508  }
509  result->AddValue(op,
510  value->representation(),
512  }
513 
514  // Recursively store the nested objects into the environment
515  for (int i = 0; i < hydrogen_env->length(); ++i) {
516  if (hydrogen_env->is_special_index(i)) continue;
517 
518  HValue* value = hydrogen_env->values()->at(i);
519  if (value->IsArgumentsObject() || value->IsCapturedObject()) {
520  AddObjectToMaterialize(value, objects_to_materialize, result);
521  }
522  }
523 
524  if (hydrogen_env->frame_type() == JS_FUNCTION) {
525  *argument_index_accumulator = argument_index;
526  }
527 
528  return result;
529 }
530 
531 
532 // Add an object to the supplied environment and object materialization list.
533 //
534 // Notes:
535 //
536 // We are building three lists here:
537 //
538 // 1. In the result->object_mapping_ list (added to by the
539 // LEnvironment::Add*Object methods), we store the lengths (number
540 // of fields) of the captured objects in depth-first traversal order, or
541 // in case of duplicated objects, we store the index to the duplicate object
542 // (with a tag to differentiate between captured and duplicated objects).
543 //
544 // 2. The object fields are stored in the result->values_ list
545 // (added to by the LEnvironment.AddValue method) sequentially as lists
546 // of fields with holes for nested objects (the holes will be expanded
547 // later by LCodegen::AddToTranslation according to the
548 // LEnvironment.object_mapping_ list).
549 //
550 // 3. The auxiliary objects_to_materialize array stores the hydrogen values
551 // in the same order as result->object_mapping_ list. This is used
552 // to detect duplicate values and calculate the corresponding object index.
553 void LChunkBuilderBase::AddObjectToMaterialize(HValue* value,
554  ZoneList<HValue*>* objects_to_materialize, LEnvironment* result) {
555  int object_index = objects_to_materialize->length();
556  // Store the hydrogen value into the de-duplication array
557  objects_to_materialize->Add(value, zone());
558  // Find out whether we are storing a duplicated value
559  int previously_materialized_object = -1;
560  for (int prev = 0; prev < object_index; ++prev) {
561  if (objects_to_materialize->at(prev) == value) {
562  previously_materialized_object = prev;
563  break;
564  }
565  }
566  // Store the captured object length (or duplicated object index)
567  // into the environment. For duplicated objects, we stop here.
568  int length = value->OperandCount();
569  bool is_arguments = value->IsArgumentsObject();
570  if (previously_materialized_object >= 0) {
571  result->AddDuplicateObject(previously_materialized_object);
572  return;
573  } else {
574  result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
575  }
576  // Store the captured object's fields into the environment
577  for (int i = is_arguments ? 1 : 0; i < length; ++i) {
578  LOperand* op;
579  HValue* arg_value = value->OperandAt(i);
580  if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
581  // Insert a hole for nested objects
582  op = LEnvironment::materialization_marker();
583  } else {
584  ASSERT(!arg_value->IsPushArgument());
585  // For ordinary values, tell the register allocator we need the value
586  // to be alive here
587  op = UseAny(arg_value);
588  }
589  result->AddValue(op,
590  arg_value->representation(),
591  arg_value->CheckFlag(HInstruction::kUint32));
592  }
593  // Recursively store all the nested captured objects into the environment
594  for (int i = is_arguments ? 1 : 0; i < length; ++i) {
595  HValue* arg_value = value->OperandAt(i);
596  if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
597  AddObjectToMaterialize(arg_value, objects_to_materialize, result);
598  }
599  }
600 }
601 
602 
603 LInstruction* LChunkBuilder::CheckElideControlInstruction(
604  HControlInstruction* instr) {
605  HBasicBlock* successor;
606  if (!instr->KnownSuccessorBlock(&successor)) return NULL;
607  return new(zone()) LGoto(successor);
608 }
609 
610 
612  if (ShouldProduceTraceOutput()) {
613  isolate()->GetHTracer()->TraceLithium(name(), chunk_);
614  }
615 }
616 
617 
618 } } // namespace v8::internal
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter NULL
Definition: flags.cc:269
void FinishCode(Handle< Code > code)
int index() const
Definition: lithium.h:61
static LUnallocated * cast(LOperand *op)
Definition: lithium.h:156
static LGap * cast(LInstruction *instr)
Definition: lithium-arm.h:349
int GetParameterStackSlot(int index) const
Definition: lithium.cc:345
BitVector * allocated_double_registers()
Definition: lithium.h:668
int fixed_slot_index() const
Definition: lithium.h:245
static void TearDownCaches()
Definition: lithium.cc:146
int StackSlotOffset(int index)
Definition: lithium.cc:240
LParallelMove * GetOrCreateParallelMove(InnerPosition pos, Zone *zone)
Definition: lithium-arm.h:367
int ParameterAt(int index)
Definition: lithium.cc:358
#define LITHIUM_OPERAND_SETUP(name, type, number)
void set_bailout_reason(BailoutReason reason)
Definition: compiler.h:281
LLabel * GetLabel(int block_id) const
Definition: lithium.cc:263
uint32_t Flags
Definition: objects.h:5184
Handle< Code > Codegen()
Definition: lithium.cc:425
#define ASSERT(condition)
Definition: checks.h:329
static void SetUpCaches()
Definition: lithium.cc:139
#define CHECK(condition)
Definition: checks.h:75
#define LITHIUM_OPERAND_TEARDOWN(name, type, number)
LChunk(CompilationInfo *info, HGraph *graph)
Definition: lithium.cc:253
Representation representation() const
static const char * AllocationIndexToString(int index)
LGap * GetGapAt(int index) const
Definition: lithium.cc:365
static void PrintCode(Handle< Code > code, CompilationInfo *info)
Definition: codegen.cc:126
void Add(Vector< const char > format, Vector< FmtElm > elms)
int virtual_register() const
Definition: lithium.h:257
static const int kMaxVirtualRegisters
Definition: lithium.h:193
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in only print modified registers Don t break for ASM_UNIMPLEMENTED_BREAK macros print stack trace when an illegal exception is thrown randomize hashes to avoid predictable hash Fixed seed to use to hash property Print the time it takes to deserialize the snapshot testing_bool_flag testing_int_flag string flag tmp file in which to serialize heap Print the time it takes to lazily compile hydrogen code stubs concurrent_recompilation concurrent_sweeping Print usage including flags
Definition: flags.cc:665
const int kDoubleSize
Definition: globals.h:266
const int kPointerSize
Definition: globals.h:268
bool saves_caller_doubles() const
Definition: compiler.h:157
Code::Flags flags() const
Definition: compiler.cc:213
Kind kind() const
Definition: lithium.h:60
void set_allocated_double_registers(BitVector *allocated_registers)
Definition: lithium.cc:454
BasicPolicy basic_policy() const
Definition: lithium.h:234
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra code(assertions) for debugging") DEFINE_bool(code_comments
const int kFPOnStackSize
Definition: globals.h:271
static LChunk * NewChunk(HGraph *graph)
Definition: lithium.cc:398
Zone * zone() const
Definition: lithium.h:663
static void MakeCodePrologue(CompilationInfo *info, const char *kind)
Definition: codegen.cc:62
int fixed_register_index() const
Definition: lithium.h:251
bool IsGapAt(int index) const
Definition: lithium.cc:370
virtual bool IsControl() const
Definition: lithium-arm.h:243
LPointerMap * pointer_map() const
Definition: lithium-arm.h:250
static const int kFixedFrameSizeFromFp
Definition: frames.h:180
virtual bool IsGap() const
Definition: lithium-arm.h:241
void AddInstruction(LInstruction *instruction, HBasicBlock *block)
Definition: lithium.cc:320
HGraph * graph() const
Definition: lithium.h:642
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function info
Definition: flags.cc:317
CompilationInfo * info() const
Definition: lithium.h:641
bool CheckFlag(Flag f) const
static Handle< T > null()
Definition: handles.h:80
void AddGapMove(int index, LOperand *from, LOperand *to)
Definition: lithium.cc:381
Isolate * isolate() const
Definition: lithium.h:643
LConstantOperand * DefineConstantOperand(HConstant *constant)
Definition: lithium.cc:340
PositionsRecorder * positions_recorder()
void Add(const T &element, AllocationPolicy allocator=AllocationPolicy())
Definition: list-inl.h:39
void MarkEmptyBlocks()
Definition: lithium.cc:285
static const char * AllocationIndexToString(int index)
Representation LookupLiteralRepresentation(LConstantOperand *operand) const
Definition: lithium.cc:392
Label * GetAssemblyLabel(int block_id) const
Definition: lithium.cc:278
bool HasPointerMap() const
Definition: lithium-arm.h:251
virtual void AbortedCodeGeneration()
int NearestGapPos(int index) const
Definition: lithium.cc:375
bool IsRedundant() const
Definition: lithium-arm.cc:113
const int kPCOnStackSize
Definition: globals.h:270
int LookupDestination(int block_id) const
Definition: lithium.cc:270
static Handle< Code > MakeCodeEpilogue(MacroAssembler *masm, Code::Flags flags, CompilationInfo *info)
Definition: codegen.cc:103
#define LITHIUM_OPERAND_LIST(V)
Definition: lithium.h:38
#define LOG_CODE_EVENT(isolate, Call)
Definition: log.h:94
enable upcoming ES6 features enable harmony block scoping enable harmony enable harmony proxies enable harmony generators enable harmony numeric enable harmony string enable harmony math functions harmony_scoping harmony_symbols harmony_collections harmony_iteration harmony_strings harmony_scoping harmony_maths tracks arrays with only smi values Optimize object Array DOM strings and string pretenure call new trace pretenuring decisions of HAllocate instructions track fields with only smi values track fields with heap values track_fields track_fields Enables optimizations which favor memory size over execution speed use string slices optimization filter maximum number of GVN fix point iterations use function inlining use allocation folding eliminate write barriers targeting allocations in optimized code maximum source size in bytes considered for a single inlining maximum cumulative number of AST nodes considered for inlining crankshaft harvests type feedback from stub cache trace check elimination phase hydrogen tracing filter trace hydrogen to given file name trace inlining decisions trace store elimination trace all use positions trace global value numbering trace hydrogen escape analysis trace the tracking of allocation sites trace map generalization environment for every instruction deoptimize every n garbage collections put a break point before deoptimizing deoptimize uncommon cases use on stack replacement trace array bounds check elimination perform array index dehoisting use load elimination use store elimination use constant folding eliminate unreachable code number of stress runs when picking a function to watch for shared function not JSFunction itself flushes the cache of optimized code for closures on every GC functions with arguments object maximum number of escape analysis fix point iterations allow uint32 values on optimize frames if they are used only in safe operations track concurrent recompilation artificial compilation delay in ms concurrent on stack replacement do not emit check maps for constant values that have a leaf deoptimize the optimized code if the layout of the maps changes number of stack frames inspected by the profiler percentage of ICs that must have type info to allow optimization extra verbose compilation tracing generate extra emit comments in code disassembly enable use of SSE3 instructions if available enable use of CMOV instruction if available enable use of VFP3 instructions if available enable use of NEON instructions if enable use of SDIV and UDIV instructions if enable loading bit constant by means of movw movt instruction enable unaligned accesses for enable use of d16 d31 registers on ARM this requires VFP3 force all emitted branches to be in long expose natives in global object expose freeBuffer extension expose gc extension under the specified name expose externalize string extension number of stack frames to capture disable builtin natives files print name of functions for which code is generated use random jit cookie to mask large constants trace lazy optimization use adaptive optimizations always try to OSR functions trace optimize function deoptimization minimum length for automatic enable preparsing maximum number of optimization attempts before giving up cache prototype transitions trace debugging JSON request response trace out of bounds accesses to external arrays trace_js_array_abuse automatically set the debug break flag when debugger commands are in the queue abort by crashing maximum length of function source code printed in a stack trace max size of the new max size of the old max size of executable always perform global GCs print one trace line following each garbage collection do not print trace line after scavenger collection print statistics of the maximum memory committed for the heap in name
Definition: flags.cc:505
HConstant * LookupConstant(LConstantOperand *operand) const
Definition: lithium.cc:387
HValue * hydrogen_value() const
Definition: lithium-arm.h:254
void PrintTo(StringStream *stream)
Definition: lithium.cc:55
ExtendedPolicy extended_policy() const
Definition: lithium.h:239
const ZoneList< LInstruction * > * instructions() const
Definition: lithium.h:644