Flutter Engine
The Flutter Engine
il_test.cc
Go to the documentation of this file.
1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6
7#include <optional>
8#include <vector>
9
11#include "platform/utils.h"
12#include "vm/class_id.h"
20#include "vm/unit_test.h"
21
22namespace dart {
23
24ISOLATE_UNIT_TEST_CASE(InstructionTests) {
25 TargetEntryInstr* target_instr =
27 EXPECT(target_instr->IsBlockEntry());
28 EXPECT(!target_instr->IsDefinition());
29}
30
31ISOLATE_UNIT_TEST_CASE(OptimizationTests) {
34
35 Definition* def1 = new PhiInstr(join, 0);
36 Definition* def2 = new PhiInstr(join, 0);
37 Value* use1a = new Value(def1);
38 Value* use1b = new Value(def1);
39 EXPECT(use1a->Equals(*use1b));
40 Value* use2 = new Value(def2);
41 EXPECT(!use2->Equals(*use1a));
42
45 EXPECT(c1->Equals(*c2));
48 EXPECT(c3->Equals(*c4));
49 EXPECT(!c3->Equals(*c1));
50}
51
52ISOLATE_UNIT_TEST_CASE(IRTest_EliminateWriteBarrier) {
53 // clang-format off
54 const char* kScript = R"(
55 class Container<T> {
56 operator []=(var index, var value) {
57 return data[index] = value;
58 }
59
60 List<T?> data = List<T?>.filled(10, null);
61 }
62
63 Container<int> x = Container<int>();
64
65 foo() {
66 for (int i = 0; i < 10; ++i) {
67 x[i] = i;
68 }
69 }
70 )";
71 // clang-format on
72
73 const auto& root_library = Library::Handle(LoadTestScript(kScript));
74 const auto& function = Function::Handle(GetFunction(root_library, "foo"));
75
76 Invoke(root_library, "foo");
77
79 FlowGraph* flow_graph = pipeline.RunPasses({});
80
81 auto entry = flow_graph->graph_entry()->normal_entry();
82 EXPECT(entry != nullptr);
83
84 StoreIndexedInstr* store_indexed = nullptr;
85
86 ILMatcher cursor(flow_graph, entry, true);
88 kMoveGlob,
89 kMatchAndMoveBranchTrue,
90 kMoveGlob,
91 {kMatchStoreIndexed, &store_indexed},
92 }));
93
94 EXPECT(!store_indexed->value()->NeedsWriteBarrier());
95}
96
97static void ExpectStores(FlowGraph* flow_graph,
98 const std::vector<const char*>& expected_stores) {
99 size_t next_expected_store = 0;
100 for (BlockIterator block_it = flow_graph->reverse_postorder_iterator();
101 !block_it.Done(); block_it.Advance()) {
102 for (ForwardInstructionIterator it(block_it.Current()); !it.Done();
103 it.Advance()) {
104 if (auto store = it.Current()->AsStoreField()) {
105 EXPECT_LT(next_expected_store, expected_stores.size());
106 EXPECT_STREQ(expected_stores[next_expected_store],
107 store->slot().Name());
108 next_expected_store++;
109 }
110 }
111 }
112}
113
115 const Library& root_library,
116 const char* function_name,
118 const std::vector<const char*>& expected_stores) {
119 const auto& function =
120 Function::Handle(GetFunction(root_library, function_name));
121 TestPipeline pipeline(function, mode);
122 FlowGraph* flow_graph = pipeline.RunPasses({
123 CompilerPass::kComputeSSA,
124 CompilerPass::kTypePropagation,
125 CompilerPass::kApplyICData,
126 CompilerPass::kInlining,
127 CompilerPass::kTypePropagation,
128 CompilerPass::kSelectRepresentations,
129 CompilerPass::kCanonicalize,
130 CompilerPass::kConstantPropagation,
131 });
132 ASSERT(flow_graph != nullptr);
133 ExpectStores(flow_graph, expected_stores);
134}
135
136ISOLATE_UNIT_TEST_CASE(IRTest_InitializingStores) {
137 // clang-format off
138 const char* kScript = R"(
139 class Bar {
140 var f;
141 var g;
142
143 Bar({this.f, this.g});
144 }
145 Bar f1() => Bar(f: 10);
146 Bar f2() => Bar(g: 10);
147 f3() {
148 return () { };
149 }
150 f4<T>({T? value}) {
151 return () { return value; };
152 }
153 main() {
154 f1();
155 f2();
156 f3();
157 f4();
158 }
159 )";
160 // clang-format on
161
162 const auto& root_library = Library::Handle(LoadTestScript(kScript));
163 Invoke(root_library, "main");
164
165 RunInitializingStoresTest(root_library, "f1", CompilerPass::kJIT,
166 /*expected_stores=*/{"f"});
167 RunInitializingStoresTest(root_library, "f2", CompilerPass::kJIT,
168 /*expected_stores=*/{"g"});
169 RunInitializingStoresTest(root_library, "f3", CompilerPass::kJIT,
170 /*expected_stores=*/
171 {"Closure.function", "Closure.entry_point"});
172
173 // Note that in JIT mode we lower context allocation in a way that hinders
174 // removal of initializing moves so there would be some redundant stores of
175 // null left in the graph. In AOT mode we don't apply this optimization
176 // which enables us to remove more stores.
177 std::vector<const char*> expected_stores_jit;
178 std::vector<const char*> expected_stores_aot;
179
180 expected_stores_jit.insert(
181 expected_stores_jit.end(),
182 {"value", "Context.parent", "Context.parent", "value",
183 "Closure.function_type_arguments", "Closure.context"});
184 expected_stores_aot.insert(
185 expected_stores_aot.end(),
186 {"value", "Closure.function_type_arguments", "Closure.context"});
187
188 RunInitializingStoresTest(root_library, "f4", CompilerPass::kJIT,
189 expected_stores_jit);
190 RunInitializingStoresTest(root_library, "f4", CompilerPass::kAOT,
191 expected_stores_aot);
192}
193
194// Returns |true| if compiler canonicalizes away a chain of IntConverters going
195// from |initial| representation to |intermediate| representation and then
196// back to |initial| given that initial value has range [min_value, max_value].
198 int64_t min_value,
199 int64_t max_value,
200 Representation initial,
201 Representation intermediate,
202 Representation final) {
204
205 CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
206
207 FlowGraphBuilderHelper H(/*num_parameters=*/1);
208 H.AddVariable("v0", AbstractType::ZoneHandle(Type::IntType()));
209
210 auto normal_entry = H.flow_graph()->graph_entry()->normal_entry();
211
212 Definition* v0;
213 DartReturnInstr* ret;
214
215 {
216 BlockBuilder builder(H.flow_graph(), normal_entry);
217 v0 = builder.AddParameter(0, initial);
218 v0->set_range(Range(RangeBoundary::FromConstant(min_value),
219 RangeBoundary::FromConstant(max_value)));
220 auto conv1 = builder.AddDefinition(new IntConverterInstr(
221 initial, intermediate, new Value(v0), S.GetNextDeoptId()));
222 auto conv2 = builder.AddDefinition(new IntConverterInstr(
223 intermediate, initial, new Value(conv1), S.GetNextDeoptId()));
224 ret = builder.AddReturn(new Value(conv2));
225 }
226
227 H.FinishGraph();
228
229 H.flow_graph()->Canonicalize();
230 H.flow_graph()->Canonicalize();
231
232 return ret->value()->definition() == v0;
233}
234
235ISOLATE_UNIT_TEST_CASE(IL_IntConverterCanonicalization) {
237 kUnboxedInt64, kUnboxedInt32,
238 kUnboxedInt64));
240 kUnboxedInt64, kUnboxedInt32,
241 kUnboxedInt64));
243 thread, kMinInt32, static_cast<int64_t>(kMaxInt32) + 1, kUnboxedInt64,
244 kUnboxedInt32, kUnboxedInt64));
246 thread, 0, kMaxInt16, kUnboxedInt64, kUnboxedUint32, kUnboxedInt64));
248 thread, 0, kMaxInt32, kUnboxedInt64, kUnboxedUint32, kUnboxedInt64));
250 thread, 0, kMaxUint32, kUnboxedInt64, kUnboxedUint32, kUnboxedInt64));
252 thread, 0, static_cast<int64_t>(kMaxUint32) + 1, kUnboxedInt64,
253 kUnboxedUint32, kUnboxedInt64));
255 thread, -1, kMaxInt16, kUnboxedInt64, kUnboxedUint32, kUnboxedInt64));
256
257 // Regression test for https://dartbug.com/53613.
259 kUnboxedInt32, kUnboxedUint32,
260 kUnboxedInt64));
262 kUnboxedInt32, kUnboxedUint32,
263 kUnboxedInt32));
265 thread, 0, kMaxInt32, kUnboxedInt32, kUnboxedUint32, kUnboxedInt64));
267 thread, 0, kMaxInt32, kUnboxedInt32, kUnboxedUint32, kUnboxedInt32));
268}
269
270ISOLATE_UNIT_TEST_CASE(IL_PhiCanonicalization) {
272
273 CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
274
275 FlowGraphBuilderHelper H(/*num_parameters=*/1);
276 H.AddVariable("v0", AbstractType::ZoneHandle(Type::DynamicType()));
277
278 auto normal_entry = H.flow_graph()->graph_entry()->normal_entry();
279 auto b2 = H.JoinEntry();
280 auto b3 = H.TargetEntry();
281 auto b4 = H.TargetEntry();
282
283 Definition* v0;
284 DartReturnInstr* ret;
285 PhiInstr* phi;
286
287 {
288 BlockBuilder builder(H.flow_graph(), normal_entry);
289 v0 = builder.AddParameter(0, kTagged);
290 builder.AddInstruction(new GotoInstr(b2, S.GetNextDeoptId()));
291 }
292
293 {
294 BlockBuilder builder(H.flow_graph(), b2);
295 phi = new PhiInstr(b2, 2);
296 phi->SetInputAt(0, new Value(v0));
297 phi->SetInputAt(1, new Value(phi));
298 builder.AddPhi(phi);
299 builder.AddBranch(new StrictCompareInstr(
300 InstructionSource(), Token::kEQ_STRICT,
301 new Value(H.IntConstant(1)), new Value(phi),
302 /*needs_number_check=*/false, S.GetNextDeoptId()),
303 b3, b4);
304 }
305
306 {
307 BlockBuilder builder(H.flow_graph(), b3);
308 builder.AddInstruction(new GotoInstr(b2, S.GetNextDeoptId()));
309 }
310
311 {
312 BlockBuilder builder(H.flow_graph(), b4);
313 ret = builder.AddReturn(new Value(phi));
314 }
315
316 H.FinishGraph();
317
318 H.flow_graph()->Canonicalize();
319
320 EXPECT(ret->value()->definition() == v0);
321}
322
323// Regression test for issue 46018.
324ISOLATE_UNIT_TEST_CASE(IL_UnboxIntegerCanonicalization) {
326
327 CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
328
329 FlowGraphBuilderHelper H(/*num_parameters=*/2);
330 H.AddVariable("v0", AbstractType::ZoneHandle(Type::DynamicType()));
331 H.AddVariable("v1", AbstractType::ZoneHandle(Type::DynamicType()));
332
333 auto normal_entry = H.flow_graph()->graph_entry()->normal_entry();
334 Definition* unbox;
335
336 {
337 BlockBuilder builder(H.flow_graph(), normal_entry);
338 Definition* index = H.IntConstant(0);
339 Definition* int_type =
340 H.flow_graph()->GetConstant(Type::Handle(Type::IntType()));
341
342 Definition* float64_array = builder.AddParameter(0, kTagged);
343 Definition* int64_array = builder.AddParameter(1, kTagged);
344
345 Definition* load_indexed = builder.AddDefinition(new LoadIndexedInstr(
346 new Value(float64_array), new Value(index),
347 /* index_unboxed */ false,
348 /* index_scale */ 8, kTypedDataFloat64ArrayCid, kAlignedAccess,
349 S.GetNextDeoptId(), InstructionSource()));
350 Definition* box = builder.AddDefinition(
351 BoxInstr::Create(kUnboxedDouble, new Value(load_indexed)));
352 Definition* cast = builder.AddDefinition(new AssertAssignableInstr(
353 InstructionSource(), new Value(box), new Value(int_type),
354 /* instantiator_type_arguments */
355 new Value(H.flow_graph()->constant_null()),
356 /* function_type_arguments */
357 new Value(H.flow_graph()->constant_null()),
358 /* dst_name */ String::Handle(String::New("not-null")),
359 S.GetNextDeoptId()));
360 unbox = builder.AddDefinition(new UnboxInt64Instr(
361 new Value(cast), S.GetNextDeoptId(), BoxInstr::kGuardInputs));
362
363 builder.AddInstruction(new StoreIndexedInstr(
364 new Value(int64_array), new Value(index), new Value(unbox),
366 /* index_unboxed */ false,
367 /* index_scale */ 8, kTypedDataInt64ArrayCid, kAlignedAccess,
368 S.GetNextDeoptId(), InstructionSource()));
369 builder.AddReturn(new Value(index));
370 }
371
372 H.FinishGraph();
373
374 FlowGraphTypePropagator::Propagate(H.flow_graph());
375 EXPECT(!unbox->ComputeCanDeoptimize());
376
377 H.flow_graph()->Canonicalize();
378 EXPECT(!unbox->ComputeCanDeoptimize());
379
380 H.flow_graph()->RemoveRedefinitions();
381 EXPECT(!unbox->ComputeCanDeoptimize()); // Previously this reverted to true.
382}
383
384static void WriteCidTo(intptr_t cid, BaseTextBuffer* buffer) {
385 ClassTable* const class_table = IsolateGroup::Current()->class_table();
386 buffer->Printf("%" Pd "", cid);
387 if (class_table->HasValidClassAt(cid)) {
388 const auto& cls = Class::Handle(class_table->At(cid));
389 buffer->Printf(" (%s", cls.ScrubbedNameCString());
390 if (cls.is_abstract()) {
391 buffer->AddString(", abstract");
392 }
393 buffer->AddString(")");
394 }
395}
396
398 Thread* thread,
399 bool allow_representation_change) {
401
402 CompilerState S(thread, /*is_aot=*/true, /*is_optimizing=*/true);
403
404 FlowGraphBuilderHelper H(/*num_parameters=*/2);
405 H.AddVariable("v0", AbstractType::ZoneHandle(Type::IntType()));
406 H.AddVariable("v1", AbstractType::ZoneHandle(Type::IntType()));
407
408 auto normal_entry = H.flow_graph()->graph_entry()->normal_entry();
409
410 EqualityCompareInstr* compare = nullptr;
411 {
412 BlockBuilder builder(H.flow_graph(), normal_entry);
413 Definition* v0 = builder.AddParameter(0, kUnboxedInt64);
414 Definition* v1 = builder.AddParameter(1, kUnboxedInt64);
415 Definition* box0 = builder.AddDefinition(new BoxInt64Instr(new Value(v0)));
416 Definition* box1 = builder.AddDefinition(new BoxInt64Instr(new Value(v1)));
417
418 compare = builder.AddDefinition(new EqualityCompareInstr(
419 InstructionSource(), Token::kEQ, new Value(box0), new Value(box1),
420 kMintCid, S.GetNextDeoptId(), /*null_aware=*/true));
421 builder.AddReturn(new Value(compare));
422 }
423
424 H.FinishGraph();
425
426 if (!allow_representation_change) {
427 H.flow_graph()->disallow_unmatched_representations();
428 }
429
430 H.flow_graph()->Canonicalize();
431
432 EXPECT(compare->is_null_aware() == !allow_representation_change);
433}
434
435ISOLATE_UNIT_TEST_CASE(IL_Canonicalize_EqualityCompare) {
438}
439
440static void WriteCidRangeVectorTo(const CidRangeVector& ranges,
442 if (ranges.is_empty()) {
443 buffer->AddString("empty CidRangeVector");
444 return;
445 }
446 buffer->AddString("non-empty CidRangeVector:\n");
447 for (const auto& range : ranges) {
448 for (intptr_t cid = range.cid_start; cid <= range.cid_end; cid++) {
449 buffer->AddString(" * ");
451 buffer->AddString("\n");
452 }
453 }
454}
455
456static bool ExpectRangesContainCid(const Expect& expect,
457 const CidRangeVector& ranges,
458 intptr_t expected) {
459 for (const auto& range : ranges) {
460 for (intptr_t cid = range.cid_start; cid <= range.cid_end; cid++) {
461 if (expected == cid) return true;
462 }
463 }
464 TextBuffer buffer(128);
465 buffer.AddString("Expected CidRangeVector to include cid ");
466 WriteCidTo(expected, &buffer);
467 expect.Fail("%s", buffer.buffer());
468 return false;
469}
470
471static void RangesContainExpectedCids(const Expect& expect,
472 const CidRangeVector& ranges,
473 const GrowableArray<intptr_t>& expected) {
474 ASSERT(!ranges.is_empty());
475 ASSERT(!expected.is_empty());
476 {
477 TextBuffer buffer(128);
478 buffer.AddString("Checking that ");
480 buffer.AddString("includes cids:\n");
481 for (const intptr_t cid : expected) {
482 buffer.AddString(" * ");
484 buffer.AddString("\n");
485 }
486 THR_Print("%s", buffer.buffer());
487 }
488 bool all_found = true;
489 for (const intptr_t cid : expected) {
490 if (!ExpectRangesContainCid(expect, ranges, cid)) {
491 all_found = false;
492 }
493 }
494 if (all_found) {
495 THR_Print("All expected cids included.\n\n");
496 }
497}
498
499#define RANGES_CONTAIN_EXPECTED_CIDS(ranges, cids) \
500 RangesContainExpectedCids(dart::Expect(__FILE__, __LINE__), ranges, cids)
501
502ISOLATE_UNIT_TEST_CASE(HierarchyInfo_Object_Subtype) {
503 HierarchyInfo hi(thread);
504 const auto& type =
505 Type::Handle(IsolateGroup::Current()->object_store()->object_type());
506 const bool is_nullable = Instance::NullIsAssignableTo(type);
508 const auto& cls = Class::Handle(type.type_class());
509
510 ClassTable* const class_table = thread->isolate_group()->class_table();
511 const intptr_t num_cids = class_table->NumCids();
512 auto& to_check = Class::Handle(thread->zone());
513 auto& rare_type = AbstractType::Handle(thread->zone());
514
515 GrowableArray<intptr_t> expected_concrete_cids;
516 GrowableArray<intptr_t> expected_abstract_cids;
517 for (intptr_t cid = kInstanceCid; cid < num_cids; cid++) {
518 if (!class_table->HasValidClassAt(cid)) continue;
519 if (cid == kNullCid) continue;
520 if (cid == kNeverCid) continue;
521 if (cid == kDynamicCid && !is_nullable) continue;
522 if (cid == kVoidCid && !is_nullable) continue;
523 to_check = class_table->At(cid);
524 // Only add concrete classes.
525 if (to_check.is_abstract()) {
526 expected_abstract_cids.Add(cid);
527 } else {
528 expected_concrete_cids.Add(cid);
529 }
530 if (cid != kTypeArgumentsCid) { // Cannot call RareType() on this.
531 rare_type = to_check.RareType();
532 EXPECT(rare_type.IsSubtypeOf(type, Heap::kNew));
533 }
534 }
535
536 const CidRangeVector& concrete_range = hi.SubtypeRangesForClass(
537 cls, /*include_abstract=*/false, /*exclude_null=*/!is_nullable);
538 RANGES_CONTAIN_EXPECTED_CIDS(concrete_range, expected_concrete_cids);
539
540 GrowableArray<intptr_t> expected_cids;
541 expected_cids.AddArray(expected_concrete_cids);
542 expected_cids.AddArray(expected_abstract_cids);
543 const CidRangeVector& abstract_range = hi.SubtypeRangesForClass(
544 cls, /*include_abstract=*/true, /*exclude_null=*/!is_nullable);
545 RANGES_CONTAIN_EXPECTED_CIDS(abstract_range, expected_cids);
546}
547
548ISOLATE_UNIT_TEST_CASE(HierarchyInfo_Function_Subtype) {
549 HierarchyInfo hi(thread);
550 const auto& type =
551 Type::Handle(IsolateGroup::Current()->object_store()->function_type());
553 const auto& cls = Class::Handle(type.type_class());
554
555 GrowableArray<intptr_t> expected_concrete_cids;
556 expected_concrete_cids.Add(kClosureCid);
557
558 GrowableArray<intptr_t> expected_abstract_cids;
559 expected_abstract_cids.Add(type.type_class_id());
560
561 const CidRangeVector& concrete_range = hi.SubtypeRangesForClass(
562 cls, /*include_abstract=*/false, /*exclude_null=*/true);
563 RANGES_CONTAIN_EXPECTED_CIDS(concrete_range, expected_concrete_cids);
564
565 GrowableArray<intptr_t> expected_cids;
566 expected_cids.AddArray(expected_concrete_cids);
567 expected_cids.AddArray(expected_abstract_cids);
568 const CidRangeVector& abstract_range = hi.SubtypeRangesForClass(
569 cls, /*include_abstract=*/true, /*exclude_null=*/true);
570 RANGES_CONTAIN_EXPECTED_CIDS(abstract_range, expected_cids);
571}
572
573ISOLATE_UNIT_TEST_CASE(HierarchyInfo_Num_Subtype) {
574 HierarchyInfo hi(thread);
575 const auto& num_type = Type::Handle(Type::Number());
576 const auto& int_type = Type::Handle(Type::IntType());
577 const auto& double_type = Type::Handle(Type::Double());
579 const auto& cls = Class::Handle(num_type.type_class());
580
581 GrowableArray<intptr_t> expected_concrete_cids;
582 expected_concrete_cids.Add(kSmiCid);
583 expected_concrete_cids.Add(kMintCid);
584 expected_concrete_cids.Add(kDoubleCid);
585
586 GrowableArray<intptr_t> expected_abstract_cids;
587 expected_abstract_cids.Add(num_type.type_class_id());
588 expected_abstract_cids.Add(int_type.type_class_id());
589 expected_abstract_cids.Add(double_type.type_class_id());
590
591 const CidRangeVector& concrete_range = hi.SubtypeRangesForClass(
592 cls, /*include_abstract=*/false, /*exclude_null=*/true);
593 RANGES_CONTAIN_EXPECTED_CIDS(concrete_range, expected_concrete_cids);
594
595 GrowableArray<intptr_t> expected_cids;
596 expected_cids.AddArray(expected_concrete_cids);
597 expected_cids.AddArray(expected_abstract_cids);
598 const CidRangeVector& abstract_range = hi.SubtypeRangesForClass(
599 cls, /*include_abstract=*/true, /*exclude_null=*/true);
600 RANGES_CONTAIN_EXPECTED_CIDS(abstract_range, expected_cids);
601}
602
603ISOLATE_UNIT_TEST_CASE(HierarchyInfo_Int_Subtype) {
604 HierarchyInfo hi(thread);
605 const auto& type = Type::Handle(Type::IntType());
607 const auto& cls = Class::Handle(type.type_class());
608
609 GrowableArray<intptr_t> expected_concrete_cids;
610 expected_concrete_cids.Add(kSmiCid);
611 expected_concrete_cids.Add(kMintCid);
612
613 GrowableArray<intptr_t> expected_abstract_cids;
614 expected_abstract_cids.Add(type.type_class_id());
615
616 const CidRangeVector& concrete_range = hi.SubtypeRangesForClass(
617 cls, /*include_abstract=*/false, /*exclude_null=*/true);
618 RANGES_CONTAIN_EXPECTED_CIDS(concrete_range, expected_concrete_cids);
619
620 GrowableArray<intptr_t> expected_cids;
621 expected_cids.AddArray(expected_concrete_cids);
622 expected_cids.AddArray(expected_abstract_cids);
623 const CidRangeVector& abstract_range = hi.SubtypeRangesForClass(
624 cls, /*include_abstract=*/true, /*exclude_null=*/true);
625 RANGES_CONTAIN_EXPECTED_CIDS(abstract_range, expected_cids);
626}
627
628ISOLATE_UNIT_TEST_CASE(HierarchyInfo_String_Subtype) {
629 HierarchyInfo hi(thread);
630 const auto& type = Type::Handle(Type::StringType());
632 const auto& cls = Class::Handle(type.type_class());
633
634 GrowableArray<intptr_t> expected_concrete_cids;
635 expected_concrete_cids.Add(kOneByteStringCid);
636 expected_concrete_cids.Add(kTwoByteStringCid);
637
638 GrowableArray<intptr_t> expected_abstract_cids;
639 expected_abstract_cids.Add(type.type_class_id());
640
641 const CidRangeVector& concrete_range = hi.SubtypeRangesForClass(
642 cls, /*include_abstract=*/false, /*exclude_null=*/true);
643 THR_Print("Checking concrete subtype ranges for String\n");
644 RANGES_CONTAIN_EXPECTED_CIDS(concrete_range, expected_concrete_cids);
645
646 GrowableArray<intptr_t> expected_cids;
647 expected_cids.AddArray(expected_concrete_cids);
648 expected_cids.AddArray(expected_abstract_cids);
649 const CidRangeVector& abstract_range = hi.SubtypeRangesForClass(
650 cls, /*include_abstract=*/true, /*exclude_null=*/true);
651 THR_Print("Checking concrete and abstract subtype ranges for String\n");
652 RANGES_CONTAIN_EXPECTED_CIDS(abstract_range, expected_cids);
653}
654
655// This test verifies that double == Smi is recognized and
656// implemented using EqualityCompare.
657// Regression test for https://github.com/dart-lang/sdk/issues/47031.
658ISOLATE_UNIT_TEST_CASE(IRTest_DoubleEqualsSmi) {
659 const char* kScript = R"(
660 bool foo(double x) => (x + 0.5) == 0;
661 main() {
662 foo(-0.5);
663 }
664 )";
665
666 const auto& root_library = Library::Handle(LoadTestScript(kScript));
667 const auto& function = Function::Handle(GetFunction(root_library, "foo"));
668
669 TestPipeline pipeline(function, CompilerPass::kAOT);
670 FlowGraph* flow_graph = pipeline.RunPasses({});
671
672 auto entry = flow_graph->graph_entry()->normal_entry();
673 ILMatcher cursor(flow_graph, entry, /*trace=*/true,
674 ParallelMovesHandling::kSkip);
675
676 RELEASE_ASSERT(cursor.TryMatch({
677 kMoveGlob,
678 kMatchAndMoveBinaryDoubleOp,
679 kMatchAndMoveEqualityCompare,
680 kMatchDartReturn,
681 }));
682}
683
684ISOLATE_UNIT_TEST_CASE(IRTest_LoadThread) {
685 // clang-format off
686 auto kScript = R"(
687 import 'dart:ffi';
688
689 int myFunction() {
690 return 100;
691 }
692
693 void anotherFunction() {}
694 )";
695 // clang-format on
696
697 const auto& root_library = Library::Handle(LoadTestScript(kScript));
698 Zone* const zone = Thread::Current()->zone();
699 auto& invoke_result = Instance::Handle(zone);
700 invoke_result ^= Invoke(root_library, "myFunction");
701 EXPECT_EQ(Smi::New(100), invoke_result.ptr());
702
703 const auto& my_function =
704 Function::Handle(GetFunction(root_library, "myFunction"));
705
706 TestPipeline pipeline(my_function, CompilerPass::kJIT);
707 FlowGraph* flow_graph = pipeline.RunPasses({
708 CompilerPass::kComputeSSA,
709 });
710
711 DartReturnInstr* return_instr = nullptr;
712 {
713 ILMatcher cursor(flow_graph, flow_graph->graph_entry()->normal_entry());
714
715 EXPECT(cursor.TryMatch({
716 kMoveGlob,
717 {kMatchDartReturn, &return_instr},
718 }));
719 }
720
721 auto* const load_thread_instr = new (zone) LoadThreadInstr();
722 flow_graph->InsertBefore(return_instr, load_thread_instr, nullptr,
723 FlowGraph::kValue);
724 auto load_thread_value = Value(load_thread_instr);
725
726 auto* const convert_instr = new (zone) IntConverterInstr(
727 kUntagged, kUnboxedAddress, &load_thread_value, DeoptId::kNone);
728 flow_graph->InsertBefore(return_instr, convert_instr, nullptr,
729 FlowGraph::kValue);
730 auto convert_value = Value(convert_instr);
731
732 auto* const box_instr = BoxInstr::Create(kUnboxedAddress, &convert_value);
733 flow_graph->InsertBefore(return_instr, box_instr, nullptr, FlowGraph::kValue);
734
735 return_instr->InputAt(0)->definition()->ReplaceUsesWith(box_instr);
736
737 {
738 // Check we constructed the right graph.
739 ILMatcher cursor(flow_graph, flow_graph->graph_entry()->normal_entry());
740 EXPECT(cursor.TryMatch({
741 kMoveGlob,
742 kMatchAndMoveLoadThread,
743 kMatchAndMoveIntConverter,
744 kMatchAndMoveBox,
745 kMatchDartReturn,
746 }));
747 }
748
749 pipeline.RunForcedOptimizedAfterSSAPasses();
750
751 {
752#if !defined(PRODUCT) && !defined(USING_THREAD_SANITIZER)
753 SetFlagScope<bool> sfs(&FLAG_disassemble_optimized, true);
754#endif
755 pipeline.CompileGraphAndAttachFunction();
756 }
757
758 // Ensure we can successfully invoke the function.
759 invoke_result ^= Invoke(root_library, "myFunction");
760 intptr_t result_int = Integer::Cast(invoke_result).AsInt64Value();
761 EXPECT_EQ(reinterpret_cast<intptr_t>(thread), result_int);
762}
763
764#if !defined(TARGET_ARCH_IA32)
765ISOLATE_UNIT_TEST_CASE(IRTest_CachableIdempotentCall) {
766 // clang-format off
767 CStringUniquePtr kScript(OS::SCreate(nullptr, R"(
768 int globalCounter = 0;
769
770 int increment() => ++globalCounter;
771
772 int cachedIncrement() {
773 // We will replace this call with a cacheable call,
774 // which will lead to the counter no longer being incremented.
775 // Make sure to return the value, so we can see that the boxing and
776 // unboxing works as expected.
777 return increment();
778 }
779
780 int multipleIncrement() {
781 int returnValue = 0;
782 for(int i = 0; i < 10; i++) {
783 // Save the last returned value.
784 returnValue = cachedIncrement();
785 }
786 return returnValue;
787 }
788 )"));
789 // clang-format on
790
791 const auto& root_library = Library::Handle(LoadTestScript(kScript.get()));
792 const auto& first_result =
793 Object::Handle(Invoke(root_library, "multipleIncrement"));
794 EXPECT(first_result.IsSmi());
795 if (first_result.IsSmi()) {
796 const intptr_t int_value = Smi::Cast(first_result).Value();
797 EXPECT_EQ(10, int_value);
798 }
799
800 const auto& cached_increment_function =
801 Function::Handle(GetFunction(root_library, "cachedIncrement"));
802
803 const auto& increment_function =
804 Function::ZoneHandle(GetFunction(root_library, "increment"));
805
806 TestPipeline pipeline(cached_increment_function, CompilerPass::kJIT);
807 FlowGraph* flow_graph = pipeline.RunPasses({
808 CompilerPass::kComputeSSA,
809 });
810
811 StaticCallInstr* static_call = nullptr;
812 {
813 ILMatcher cursor(flow_graph, flow_graph->graph_entry()->normal_entry());
814
815 EXPECT(cursor.TryMatch({
816 kMoveGlob,
817 {kMatchAndMoveStaticCall, &static_call},
818 kMoveGlob,
819 kMatchDartReturn,
820 }));
821 }
822
824 CachableIdempotentCallInstr* call = new CachableIdempotentCallInstr(
825 InstructionSource(), kUnboxedAddress, increment_function,
826 static_call->type_args_len(), Array::empty_array(), std::move(args),
828 static_call->ReplaceWith(call, nullptr);
829
830 pipeline.RunForcedOptimizedAfterSSAPasses();
831
832 {
833 ILMatcher cursor(flow_graph, flow_graph->graph_entry()->normal_entry());
834
835 EXPECT(cursor.TryMatch({
836 kMoveGlob,
837 kMatchAndMoveCachableIdempotentCall,
838 kMoveGlob,
839 // The cacheable call returns unboxed, so select representations
840 // adds boxing.
841 kMatchBox,
842 kMoveGlob,
843 kMatchDartReturn,
844 }));
845 }
846
847 {
848#if !defined(PRODUCT)
849 SetFlagScope<bool> sfs(&FLAG_disassemble_optimized, true);
850#endif
851 pipeline.CompileGraphAndAttachFunction();
852 }
853
854 const auto& second_result =
855 Object::Handle(Invoke(root_library, "multipleIncrement"));
856 EXPECT(second_result.IsSmi());
857 if (second_result.IsSmi()) {
858 const intptr_t int_value = Smi::Cast(second_result).Value();
859 EXPECT_EQ(11, int_value);
860 }
861}
862#endif
863
864// Helper to set up an inlined FfiCall by replacing a StaticCall.
866 const compiler::ffi::CallMarshaller& marshaller,
867 uword native_entry,
868 bool is_leaf) {
869 FlowGraph* flow_graph = pipeline->RunPasses({CompilerPass::kComputeSSA});
870
871 {
872 // Locate the placeholder call.
873 StaticCallInstr* static_call = nullptr;
874 {
875 ILMatcher cursor(flow_graph, flow_graph->graph_entry()->normal_entry(),
876 /*trace=*/false);
877 cursor.TryMatch({kMoveGlob, {kMatchStaticCall, &static_call}});
878 }
879 RELEASE_ASSERT(static_call != nullptr);
880
881 // Store the native entry as an unboxed constant and convert it to an
882 // untagged pointer for the FfiCall.
883 Zone* const Z = flow_graph->zone();
884 auto* const load_entry_point = new (Z) IntConverterInstr(
885 kUnboxedIntPtr, kUntagged,
886 new (Z) Value(flow_graph->GetConstant(
887 Integer::Handle(Z, Integer::NewCanonical(native_entry)),
890 flow_graph->InsertBefore(static_call, load_entry_point, /*env=*/nullptr,
891 FlowGraph::kValue);
892
893 // Make an FfiCall based on ffi_trampoline that calls our native function.
894 const intptr_t num_arguments =
895 FfiCallInstr::InputCountForMarshaller(marshaller);
896 RELEASE_ASSERT(num_arguments == 1);
897 InputsArray arguments(num_arguments);
898 arguments.Add(new (Z) Value(load_entry_point));
899 auto* const ffi_call = new (Z)
900 FfiCallInstr(DeoptId::kNone, marshaller, is_leaf, std::move(arguments));
902 ffi_call->InputAt(ffi_call->TargetAddressIndex())->definition() ==
903 load_entry_point);
904 flow_graph->InsertBefore(static_call, ffi_call, /*env=*/nullptr,
905 FlowGraph::kEffect);
906
907 // Remove the placeholder call.
908 static_call->RemoveFromGraph(/*return_previous=*/false);
909 }
910
911 // Run remaining relevant compiler passes.
912 pipeline->RunAdditionalPasses({
913 CompilerPass::kApplyICData,
914 CompilerPass::kTryOptimizePatterns,
915 CompilerPass::kSetOuterInliningId,
916 CompilerPass::kTypePropagation,
917 // Skipping passes that don't seem to do anything for this test.
918 CompilerPass::kSelectRepresentations,
919 // Skipping passes that don't seem to do anything for this test.
920 CompilerPass::kTypePropagation,
921 CompilerPass::kRangeAnalysis,
922 // Skipping passes that don't seem to do anything for this test.
923 CompilerPass::kFinalizeGraph,
924 CompilerPass::kCanonicalize,
925 CompilerPass::kAllocateRegisters,
926 CompilerPass::kReorderBlocks,
927 });
928
929 return flow_graph;
930}
931
932// Test that FFI calls spill all live values to the stack, and that FFI leaf
933// calls are free to use available ABI callee-save registers to avoid spilling.
934// Additionally test that register allocation is done correctly by clobbering
935// all volatile registers in the native function being called.
936ISOLATE_UNIT_TEST_CASE(IRTest_FfiCallInstrLeafDoesntSpill) {
937 const char* kScript = R"(
938 import 'dart:ffi';
939
940 // This is purely a placeholder and is never called.
941 void placeholder() {}
942
943 // Will call the "doFfiCall" and exercise its code.
944 bool invokeDoFfiCall() {
945 final double result = doFfiCall(1, 2, 3, 1.0, 2.0, 3.0);
946 if (result != (2 + 3 + 4 + 2.0 + 3.0 + 4.0)) {
947 throw 'Failed. Result was $result.';
948 }
949 return true;
950 }
951
952 // Will perform a "C" call while having live values in registers
953 // across the FfiCall.
954 double doFfiCall(int a, int b, int c, double x, double y, double z) {
955 // Ensure there is at least one live value in a register.
956 a += 1;
957 b += 1;
958 c += 1;
959 x += 1.0;
960 y += 1.0;
961 z += 1.0;
962 // We'll replace this StaticCall with an FfiCall.
963 placeholder();
964 // Use the live value.
965 return (a + b + c + x + y + z);
966 }
967
968 // FFI trampoline function.
969 typedef NT = Void Function();
970 typedef DT = void Function();
971 Pointer<NativeFunction<NT>> ptr = Pointer.fromAddress(0);
972 DT getFfiTrampolineClosure() => ptr.asFunction(isLeaf:true);
973 )";
974
975 const auto& root_library = Library::Handle(LoadTestScript(kScript));
976
977 // Build a "C" function that we can actually invoke.
978 auto& c_function = Instructions::Handle(
980 // Clobber all volatile registers to make sure caller doesn't rely on
981 // any non-callee-save register.
982 for (intptr_t reg = 0; reg < kNumberOfFpuRegisters; reg++) {
983 if ((kAbiVolatileFpuRegs & (1 << reg)) != 0) {
984#if defined(TARGET_ARCH_ARM)
985 // On ARM we need an extra scratch register for LoadDImmediate.
986 assembler->LoadDImmediate(static_cast<DRegister>(reg), 0.0, R3);
987#else
988 assembler->LoadDImmediate(static_cast<FpuRegister>(reg), 0.0);
989#endif
990 }
991 }
992 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) {
993 if ((kDartVolatileCpuRegs & (1 << reg)) != 0) {
994 assembler->LoadImmediate(static_cast<Register>(reg), 0xDEADBEEF);
995 }
996 }
997 assembler->Ret();
998 }));
999 uword native_entry = c_function.EntryPoint();
1000
1001 // Get initial compilation done.
1002 Invoke(root_library, "invokeDoFfiCall");
1003
1004 const Function& do_ffi_call =
1005 Function::Handle(GetFunction(root_library, "doFfiCall"));
1006 RELEASE_ASSERT(!do_ffi_call.IsNull());
1007
1008 const auto& value = Closure::Handle(
1009 Closure::RawCast(Invoke(root_library, "getFfiTrampolineClosure")));
1010 RELEASE_ASSERT(value.IsClosure());
1011 const auto& ffi_trampoline =
1012 Function::ZoneHandle(Closure::Cast(value).function());
1013 RELEASE_ASSERT(!ffi_trampoline.IsNull());
1014
1015 // Construct the FFICallInstr from the trampoline matching our native
1016 // function.
1017 const char* error = nullptr;
1018 auto* const zone = thread->zone();
1019 const auto& c_signature =
1020 FunctionType::ZoneHandle(zone, ffi_trampoline.FfiCSignature());
1021 const auto marshaller_ptr = compiler::ffi::CallMarshaller::FromFunction(
1022 zone, ffi_trampoline, /*function_params_start_at=*/1, c_signature,
1023 &error);
1024 RELEASE_ASSERT(error == nullptr);
1025 RELEASE_ASSERT(marshaller_ptr != nullptr);
1026 const auto& marshaller = *marshaller_ptr;
1027
1028 const auto& compile_and_run =
1029 [&](bool is_leaf, std::function<void(ParallelMoveInstr*)> verify) {
1030 // Build the SSA graph for "doFfiCall"
1031 TestPipeline pipeline(do_ffi_call, CompilerPass::kJIT);
1032 FlowGraph* flow_graph =
1033 SetupFfiFlowgraph(&pipeline, marshaller, native_entry, is_leaf);
1034
1035 {
1036 ParallelMoveInstr* parallel_move = nullptr;
1037 ILMatcher cursor(flow_graph,
1038 flow_graph->graph_entry()->normal_entry(),
1039 /*trace=*/false);
1040 while (cursor.TryMatch(
1041 {kMoveGlob, {kMatchAndMoveParallelMove, &parallel_move}})) {
1042 verify(parallel_move);
1043 }
1044 }
1045
1046 // Finish the compilation and attach code so we can run it.
1047 pipeline.CompileGraphAndAttachFunction();
1048
1049 // Ensure we can successfully invoke the FFI call.
1050 auto& result = Object::Handle(Invoke(root_library, "invokeDoFfiCall"));
1051 RELEASE_ASSERT(result.IsBool());
1052 EXPECT(Bool::Cast(result).value());
1053 };
1054
1055 intptr_t num_cpu_reg_to_stack_nonleaf = 0;
1056 intptr_t num_cpu_reg_to_stack_leaf = 0;
1057 intptr_t num_fpu_reg_to_stack_nonleaf = 0;
1058 intptr_t num_fpu_reg_to_stack_leaf = 0;
1059
1060 // Test non-leaf spills live values.
1061 compile_and_run(/*is_leaf=*/false, [&](ParallelMoveInstr* parallel_move) {
1062 // TargetAddress is passed in register, live values are all spilled.
1063 for (int i = 0; i < parallel_move->NumMoves(); i++) {
1064 auto move = parallel_move->moves()[i];
1065 if (move->src_slot()->IsRegister() && move->dest_slot()->IsStackSlot()) {
1066 num_cpu_reg_to_stack_nonleaf++;
1067 } else if (move->src_slot()->IsFpuRegister() &&
1068 move->dest_slot()->IsDoubleStackSlot()) {
1069 num_fpu_reg_to_stack_nonleaf++;
1070 }
1071 }
1072 });
1073
1074 // Test leaf calls do not cause spills of live values.
1075 compile_and_run(/*is_leaf=*/true, [&](ParallelMoveInstr* parallel_move) {
1076 // TargetAddress is passed in registers, live values are not spilled and
1077 // remains in callee-save registers.
1078 for (int i = 0; i < parallel_move->NumMoves(); i++) {
1079 auto move = parallel_move->moves()[i];
1080 if (move->src_slot()->IsRegister() && move->dest_slot()->IsStackSlot()) {
1081 num_cpu_reg_to_stack_leaf++;
1082 } else if (move->src_slot()->IsFpuRegister() &&
1083 move->dest_slot()->IsDoubleStackSlot()) {
1084 num_fpu_reg_to_stack_leaf++;
1085 }
1086 }
1087 });
1088
1089 // We should have less moves to the stack (i.e. spilling) in leaf calls.
1090 EXPECT_LT(num_cpu_reg_to_stack_leaf, num_cpu_reg_to_stack_nonleaf);
1091 // We don't have volatile FPU registers on all platforms.
1092 const bool has_callee_save_fpu_regs =
1093 Utils::CountOneBitsWord(kAbiVolatileFpuRegs) <
1094 Utils::CountOneBitsWord(kAllFpuRegistersList);
1095 EXPECT(!has_callee_save_fpu_regs ||
1096 num_fpu_reg_to_stack_leaf < num_fpu_reg_to_stack_nonleaf);
1097}
1098
1099static void TestConstantFoldToSmi(const Library& root_library,
1100 const char* function_name,
1102 intptr_t expected_value) {
1103 const auto& function =
1104 Function::Handle(GetFunction(root_library, function_name));
1105
1106 TestPipeline pipeline(function, mode);
1107 FlowGraph* flow_graph = pipeline.RunPasses({});
1108
1109 auto entry = flow_graph->graph_entry()->normal_entry();
1110 EXPECT(entry != nullptr);
1111
1112 DartReturnInstr* ret = nullptr;
1113
1114 ILMatcher cursor(flow_graph, entry, true, ParallelMovesHandling::kSkip);
1115 RELEASE_ASSERT(cursor.TryMatch({
1116 kMoveGlob,
1117 {kMatchDartReturn, &ret},
1118 }));
1119
1120 ConstantInstr* constant = ret->value()->definition()->AsConstant();
1121 EXPECT(constant != nullptr);
1122 if (constant != nullptr) {
1123 const Object& value = constant->value();
1124 EXPECT(value.IsSmi());
1125 if (value.IsSmi()) {
1126 const intptr_t int_value = Smi::Cast(value).Value();
1127 EXPECT_EQ(expected_value, int_value);
1128 }
1129 }
1130}
1131
1132ISOLATE_UNIT_TEST_CASE(ConstantFold_bitLength) {
1133 // clang-format off
1134 auto kScript = R"(
1135 b0() => 0. bitLength; // 0...00000
1136 b1() => 1. bitLength; // 0...00001
1137 b100() => 100. bitLength;
1138 b200() => 200. bitLength;
1139 bffff() => 0xffff. bitLength;
1140 m1() => (-1).bitLength; // 1...11111
1141 m2() => (-2).bitLength; // 1...11110
1142
1143 main() {
1144 b0();
1145 b1();
1146 b100();
1147 b200();
1148 bffff();
1149 m1();
1150 m2();
1151 }
1152 )";
1153 // clang-format on
1154
1155 const auto& root_library = Library::Handle(LoadTestScript(kScript));
1156 Invoke(root_library, "main");
1157
1158 auto test = [&](const char* function, intptr_t expected) {
1159 TestConstantFoldToSmi(root_library, function, CompilerPass::kJIT, expected);
1160 TestConstantFoldToSmi(root_library, function, CompilerPass::kAOT, expected);
1161 };
1162
1163 test("b0", 0);
1164 test("b1", 1);
1165 test("b100", 7);
1166 test("b200", 8);
1167 test("bffff", 16);
1168 test("m1", 0);
1169 test("m2", 1);
1170}
1171
1173 Thread* thread,
1174 bool allow_representation_change) {
1176
1177 const auto& lib = Library::Handle(Library::CoreLibrary());
1178 const Class& list_class =
1179 Class::Handle(lib.LookupClassAllowPrivate(Symbols::_List()));
1180 EXPECT(!list_class.IsNull());
1181 const Error& err = Error::Handle(list_class.EnsureIsFinalized(thread));
1182 EXPECT(err.IsNull());
1183 const Function& list_filled = Function::ZoneHandle(
1184 list_class.LookupFactoryAllowPrivate(Symbols::_ListFilledFactory()));
1185 EXPECT(!list_filled.IsNull());
1186
1187 CompilerState S(thread, /*is_aot=*/true, /*is_optimizing=*/true);
1188
1189 FlowGraphBuilderHelper H(/*num_parameters=*/1);
1190 H.AddVariable("param", AbstractType::ZoneHandle(Type::IntType()));
1191
1192 auto normal_entry = H.flow_graph()->graph_entry()->normal_entry();
1193
1194 Definition* param = nullptr;
1195 LoadFieldInstr* load = nullptr;
1196 UnboxInstr* unbox = nullptr;
1197 Definition* add = nullptr;
1198 {
1199 BlockBuilder builder(H.flow_graph(), normal_entry);
1200 param = builder.AddParameter(0, kUnboxedInt64);
1201
1203 args.Add(new Value(H.flow_graph()->constant_null()));
1204 args.Add(new Value(param));
1205 args.Add(new Value(H.IntConstant(0)));
1206 StaticCallInstr* array = builder.AddDefinition(new StaticCallInstr(
1207 InstructionSource(), list_filled, 1, Array::empty_array(),
1208 std::move(args), DeoptId::kNone, 0, ICData::kNoRebind));
1209 array->UpdateType(CompileType::FromCid(kArrayCid));
1210 array->SetResultType(thread->zone(), CompileType::FromCid(kArrayCid));
1211 array->set_is_known_list_constructor(true);
1212
1213 load = builder.AddDefinition(new LoadFieldInstr(
1214 new Value(array), Slot::Array_length(), InstructionSource()));
1215
1216 unbox = builder.AddDefinition(new UnboxInt64Instr(
1217 new Value(load), DeoptId::kNone, Instruction::kNotSpeculative));
1218
1219 add = builder.AddDefinition(new BinaryInt64OpInstr(
1220 Token::kADD, new Value(unbox), new Value(H.IntConstant(1)),
1221 S.GetNextDeoptId(), Instruction::kNotSpeculative));
1222
1223 Definition* box = builder.AddDefinition(new BoxInt64Instr(new Value(add)));
1224
1225 builder.AddReturn(new Value(box));
1226 }
1227
1228 H.FinishGraph();
1229
1230 if (!allow_representation_change) {
1231 H.flow_graph()->disallow_unmatched_representations();
1232 }
1233
1234 H.flow_graph()->Canonicalize();
1235
1236 if (allow_representation_change) {
1237 EXPECT(add->InputAt(0)->definition() == param);
1238 } else {
1239 EXPECT(add->InputAt(0)->definition() == unbox);
1240 EXPECT(unbox->value()->definition() == load);
1241 }
1242}
1243
1244ISOLATE_UNIT_TEST_CASE(IL_Canonicalize_RepresentationChange) {
1247}
1248
1253};
1254
1256 Thread* thread,
1257 TypeDataField field_kind) {
1258 const auto& typed_data_lib = Library::Handle(Library::TypedDataLibrary());
1259 const auto& view_cls = Class::Handle(
1260 typed_data_lib.LookupClassAllowPrivate(Symbols::_Float32ArrayView()));
1261 const Error& err = Error::Handle(view_cls.EnsureIsFinalized(thread));
1262 EXPECT(err.IsNull());
1263 const auto& factory =
1264 Function::ZoneHandle(view_cls.LookupFactoryAllowPrivate(String::Handle(
1265 String::Concat(Symbols::_Float32ArrayView(), Symbols::DotUnder()))));
1266 EXPECT(!factory.IsNull());
1267
1269 CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
1271
1272 const Slot* field = nullptr;
1273 switch (field_kind) {
1275 field = &Slot::TypedDataBase_length();
1276 break;
1279 break;
1282 break;
1283 }
1284
1285 auto b1 = H.flow_graph()->graph_entry()->normal_entry();
1286
1287 const auto constant_4 = H.IntConstant(4);
1288 const auto constant_1 = H.IntConstant(1);
1289
1290 Definition* array;
1292 DartReturnInstr* ret;
1293
1294 {
1295 BlockBuilder builder(H.flow_graph(), b1);
1296 // array <- AllocateTypedData(1)
1297 array = builder.AddDefinition(new AllocateTypedDataInstr(
1298 InstructionSource(), kTypedDataFloat64ArrayCid, new Value(constant_1),
1300 // view <- StaticCall(_Float32ArrayView._, null, array, 4, 1)
1301 const auto view = builder.AddDefinition(new StaticCallInstr(
1302 InstructionSource(), factory, 1, Array::empty_array(),
1303 {new Value(H.flow_graph()->constant_null()), new Value(array),
1304 new Value(constant_4), new Value(constant_1)},
1305 DeoptId::kNone, 1, ICData::RebindRule::kStatic));
1306 // array_alias <- LoadField(view.length)
1307 load = builder.AddDefinition(
1308 new LoadFieldInstr(new Value(view), *field, InstructionSource()));
1309 // Return(load)
1310 ret = builder.AddReturn(new Value(load));
1311 }
1312 H.FinishGraph();
1313 H.flow_graph()->Canonicalize();
1314
1315 switch (field_kind) {
1317 EXPECT_PROPERTY(ret->value()->definition(), &it == constant_1);
1318 break;
1320 EXPECT_PROPERTY(ret->value()->definition(), &it == constant_4);
1321 break;
1323 EXPECT_PROPERTY(ret->value()->definition(), &it == array);
1324 break;
1325 }
1326}
1327
1328ISOLATE_UNIT_TEST_CASE(IL_Canonicalize_TypedDataViewFactory) {
1334}
1335
1336// Check that canonicalize can devirtualize InstanceCall based on type
1337// information in AOT mode.
1338ISOLATE_UNIT_TEST_CASE(IL_Canonicalize_InstanceCallWithNoICDataInAOT) {
1339 const auto& typed_data_lib = Library::Handle(Library::TypedDataLibrary());
1340 const auto& view_cls = Class::Handle(typed_data_lib.LookupClassAllowPrivate(
1341 String::Handle(Symbols::New(thread, "_TypedListBase"))));
1342 const Error& err = Error::Handle(view_cls.EnsureIsFinalized(thread));
1343 EXPECT(err.IsNull());
1344 const auto& getter = Function::Handle(
1345 view_cls.LookupFunctionAllowPrivate(Symbols::GetLength()));
1346 EXPECT(!getter.IsNull());
1347
1349 CompilerState S(thread, /*is_aot=*/true, /*is_optimizing=*/true);
1351
1352 auto b1 = H.flow_graph()->graph_entry()->normal_entry();
1353
1354 InstanceCallInstr* length_call;
1355 DartReturnInstr* ret;
1356
1357 {
1358 BlockBuilder builder(H.flow_graph(), b1);
1359 // array <- AllocateTypedData(1)
1360 const auto array = builder.AddDefinition(new AllocateTypedDataInstr(
1361 InstructionSource(), kTypedDataFloat64ArrayCid,
1362 new Value(H.IntConstant(1)), DeoptId::kNone));
1363 // length_call <- InstanceCall('get:length', array, ICData[])
1364 length_call = builder.AddDefinition(new InstanceCallInstr(
1365 InstructionSource(), Symbols::GetLength(), Token::kGET,
1366 /*args=*/{new Value(array)}, 0, Array::empty_array(), 1,
1367 /*deopt_id=*/42));
1368 length_call->EnsureICData(H.flow_graph());
1369 // Return(load)
1370 ret = builder.AddReturn(new Value(length_call));
1371 }
1372 H.FinishGraph();
1373 H.flow_graph()->Canonicalize();
1374
1375 EXPECT_PROPERTY(length_call, it.previous() == nullptr);
1376 EXPECT_PROPERTY(ret->value()->definition(), it.IsStaticCall());
1377 EXPECT_PROPERTY(ret->value()->definition()->AsStaticCall(),
1378 it.function().ptr() == getter.ptr());
1379}
1380
1382 uword lower,
1383 uword upper,
1384 bool result) {
1386 CompilerState S(Thread::Current(), /*is_aot=*/true, /*is_optimizing=*/true);
1387 FlowGraphBuilderHelper H(/*num_parameters=*/1);
1388 H.AddVariable("v0", type);
1389
1390 auto normal_entry = H.flow_graph()->graph_entry()->normal_entry();
1391
1392 DartReturnInstr* ret;
1393 {
1394 BlockBuilder builder(H.flow_graph(), normal_entry);
1395 Definition* param = builder.AddParameter(0, kTagged);
1396 Definition* load_cid =
1397 builder.AddDefinition(new LoadClassIdInstr(new Value(param)));
1398 Definition* test_range = builder.AddDefinition(new TestRangeInstr(
1399 InstructionSource(), new Value(load_cid), lower, upper, kTagged));
1400 ret = builder.AddReturn(new Value(test_range));
1401 }
1402 H.FinishGraph();
1403 H.flow_graph()->Canonicalize();
1404
1406 EXPECT_PROPERTY(ret,
1407 it.value()->BoundConstant().ptr() == Bool::Get(result).ptr());
1408}
1409
1410ISOLATE_UNIT_TEST_CASE(IL_Canonicalize_TestRange) {
1411 HierarchyInfo hierarchy_info(thread);
1412 TestTestRangeCanonicalize(AbstractType::ZoneHandle(Type::IntType()),
1413 kOneByteStringCid, kTwoByteStringCid, false);
1414 TestTestRangeCanonicalize(AbstractType::ZoneHandle(Type::IntType()), kSmiCid,
1415 kMintCid, true);
1416 TestTestRangeCanonicalize(AbstractType::ZoneHandle(Type::NullType()), kSmiCid,
1417 kMintCid, false);
1418 TestTestRangeCanonicalize(AbstractType::ZoneHandle(Type::Double()), kSmiCid,
1419 kMintCid, false);
1420 TestTestRangeCanonicalize(AbstractType::ZoneHandle(Type::ObjectType()), 1,
1421 kClassIdTagMax, true);
1422}
1423
1425 const Class& test_cls,
1426 const Field& field,
1427 intptr_t num_stores,
1428 bool expected_to_forward) {
1429 EXPECT(num_stores <= 2);
1430
1432 CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
1434
1435 auto b1 = H.flow_graph()->graph_entry()->normal_entry();
1436
1437 const auto constant_42 = H.IntConstant(42);
1438 const auto constant_24 = H.IntConstant(24);
1440 DartReturnInstr* ret;
1441
1442 {
1443 BlockBuilder builder(H.flow_graph(), b1);
1444 // obj <- AllocateObject(TestClass)
1445 const auto obj = builder.AddDefinition(
1447
1448 if (num_stores >= 1) {
1449 // StoreField(o.field = 42)
1450 builder.AddInstruction(new StoreFieldInstr(
1451 field, new Value(obj), new Value(constant_42),
1453 &H.flow_graph()->parsed_function(),
1454 StoreFieldInstr::Kind::kInitializing));
1455 }
1456
1457 if (num_stores >= 2) {
1458 // StoreField(o.field = 24)
1459 builder.AddInstruction(new StoreFieldInstr(
1460 field, new Value(obj), new Value(constant_24),
1462 &H.flow_graph()->parsed_function()));
1463 }
1464
1465 // load <- LoadField(view.field)
1466 load = builder.AddDefinition(new LoadFieldInstr(
1467 new Value(obj), Slot::Get(field, &H.flow_graph()->parsed_function()),
1469
1470 // Return(load)
1471 ret = builder.AddReturn(new Value(load));
1472 }
1473 H.FinishGraph();
1474 H.flow_graph()->Canonicalize();
1475
1476 if (expected_to_forward) {
1477 EXPECT_PROPERTY(ret->value()->definition(), &it == constant_42);
1478 } else {
1479 EXPECT_PROPERTY(ret->value()->definition(), &it == load);
1480 }
1481}
1482
1483ISOLATE_UNIT_TEST_CASE(IL_Canonicalize_FinalFieldForwarding) {
1484 const char* script_chars = R"(
1485 import 'dart:typed_data';
1486
1487 class TestClass {
1488 final dynamic finalField;
1489 late final dynamic lateFinalField;
1490 dynamic normalField;
1491
1492 TestClass(this.finalField, this.lateFinalField, this.normalField);
1493 }
1494 )";
1495 const auto& lib = Library::Handle(LoadTestScript(script_chars));
1496
1497 const auto& test_cls = Class::ZoneHandle(
1498 lib.LookupClass(String::Handle(Symbols::New(thread, "TestClass"))));
1499 const auto& err = Error::Handle(test_cls.EnsureIsFinalized(thread));
1500 EXPECT(err.IsNull());
1501
1502 const auto lookup_field = [&](const char* name) -> const Field& {
1503 const auto& original_field = Field::Handle(
1504 test_cls.LookupField(String::Handle(Symbols::New(thread, name))));
1505 EXPECT(!original_field.IsNull());
1506 return Field::Handle(original_field.CloneFromOriginal());
1507 };
1508
1509 const auto& final_field = lookup_field("finalField");
1510 const auto& late_final_field = lookup_field("lateFinalField");
1511 const auto& normal_field = lookup_field("normalField");
1512
1513 TestStaticFieldForwarding(thread, test_cls, final_field, /*num_stores=*/0,
1514 /*expected_to_forward=*/false);
1515 TestStaticFieldForwarding(thread, test_cls, final_field, /*num_stores=*/1,
1516 /*expected_to_forward=*/true);
1517 TestStaticFieldForwarding(thread, test_cls, final_field, /*num_stores=*/2,
1518 /*expected_to_forward=*/false);
1519
1520 TestStaticFieldForwarding(thread, test_cls, late_final_field,
1521 /*num_stores=*/0, /*expected_to_forward=*/false);
1522 TestStaticFieldForwarding(thread, test_cls, late_final_field,
1523 /*num_stores=*/1, /*expected_to_forward=*/false);
1524 TestStaticFieldForwarding(thread, test_cls, late_final_field,
1525 /*num_stores=*/2, /*expected_to_forward=*/false);
1526
1527 TestStaticFieldForwarding(thread, test_cls, normal_field, /*num_stores=*/0,
1528 /*expected_to_forward=*/false);
1529 TestStaticFieldForwarding(thread, test_cls, normal_field, /*num_stores=*/1,
1530 /*expected_to_forward=*/false);
1531 TestStaticFieldForwarding(thread, test_cls, normal_field, /*num_stores=*/2,
1532 /*expected_to_forward=*/false);
1533}
1534
1535template <typename... Args>
1537 const Array& args_array = Array::Handle(Array::New(sizeof...(Args)));
1538 intptr_t i = 0;
1539 (args_array.SetAt(i++, args), ...);
1540 return DartEntry::InvokeFunction(function, args_array);
1541}
1542
1544 intptr_t num_parameters,
1545 std::function<void(FlowGraphBuilderHelper&)> build_graph) {
1547
1548 TestPipeline pipeline(CompilerPass::kAOT, [&]() {
1549 FlowGraphBuilderHelper H(num_parameters);
1550 build_graph(H);
1551 H.FinishGraph();
1552 return H.flow_graph();
1553 });
1554 auto flow_graph = pipeline.RunPasses({
1555 CompilerPass::kFinalizeGraph,
1556 CompilerPass::kReorderBlocks,
1557 CompilerPass::kAllocateRegisters,
1558 });
1559 pipeline.CompileGraphAndAttachFunction();
1560 return flow_graph->function();
1561}
1562
1563enum class TestIntVariant {
1565 kTestValue,
1566};
1567
1569 Zone* zone,
1570 TestIntVariant test_variant,
1571 bool eq_zero,
1572 Representation rep,
1573 std::optional<int64_t> immediate_mask) {
1575 return BuildTestFunction(
1576 /*num_parameters=*/1 + (!immediate_mask.has_value() ? 1 : 0),
1577 [&](auto& H) {
1578 H.AddVariable("lhs", AbstractType::ZoneHandle(Type::IntType()),
1579 new CompileType(CompileType::Int()));
1580 if (!immediate_mask.has_value()) {
1581 H.AddVariable("rhs", AbstractType::ZoneHandle(Type::IntType()),
1582 new CompileType(CompileType::Int()));
1583 }
1584
1585 auto normal_entry = H.flow_graph()->graph_entry()->normal_entry();
1586 auto true_successor = H.TargetEntry();
1587 auto false_successor = H.TargetEntry();
1588
1589 {
1590 BlockBuilder builder(H.flow_graph(), normal_entry);
1591 Definition* lhs = builder.AddParameter(0);
1592 Definition* rhs = immediate_mask.has_value()
1593 ? H.IntConstant(immediate_mask.value(), rep)
1594 : builder.AddParameter(1);
1595 if (rep != lhs->representation()) {
1596 lhs =
1597 builder.AddUnboxInstr(kUnboxedInt64, lhs, /*is_checked=*/false);
1598 }
1599 if (rep != rhs->representation()) {
1600 rhs =
1601 builder.AddUnboxInstr(kUnboxedInt64, rhs, /*is_checked=*/false);
1602 }
1603
1604 auto comparison = new TestIntInstr(
1605 InstructionSource(), eq_zero ? Token::kEQ : Token::kNE, rep,
1606 new Value(lhs), new Value(rhs));
1607
1608 if (test_variant == TestIntVariant::kTestValue) {
1609 auto v2 = builder.AddDefinition(comparison);
1610 builder.AddReturn(new Value(v2));
1611 } else {
1612 builder.AddBranch(comparison, true_successor, false_successor);
1613 }
1614 }
1615
1616 if (test_variant == TestIntVariant::kTestBranch) {
1617 {
1618 BlockBuilder builder(H.flow_graph(), true_successor);
1619 builder.AddReturn(
1620 new Value(H.flow_graph()->GetConstant(Bool::True())));
1621 }
1622
1623 {
1624 BlockBuilder builder(H.flow_graph(), false_successor);
1625 builder.AddReturn(
1626 new Value(H.flow_graph()->GetConstant(Bool::False())));
1627 }
1628 }
1629 });
1630}
1631
1633 TestIntVariant test_variant,
1634 bool eq_zero,
1635 Representation rep,
1636 const std::vector<int64_t>& inputs,
1637 int64_t mask) {
1638 const auto& func =
1639 BuildTestIntFunction(zone, test_variant, eq_zero, rep, mask);
1640 auto invoke = [&](int64_t v) -> bool {
1641 const auto& input = Integer::Handle(Integer::New(v));
1642 EXPECT(rep == kUnboxedInt64 || input.IsSmi());
1643 const auto& result = Bool::CheckedHandle(zone, InvokeFunction(func, input));
1644 return result.value();
1645 };
1646
1647 for (auto& input : inputs) {
1648 const auto expected = ((input & mask) == 0) == eq_zero;
1649 const auto got = invoke(input);
1650 if (expected != got) {
1651 FAIL("testing [%s] [%s] %" Px64 " & %" Px64
1652 " %s 0: expected %s but got %s\n",
1653 test_variant == TestIntVariant::kTestBranch ? "branch" : "value",
1654 RepresentationUtils::ToCString(rep), input, mask,
1655 eq_zero ? "==" : "!=", expected ? "true" : "false",
1656 got ? "true" : "false");
1657 }
1658 }
1659}
1660
1661static void TestIntTest(Zone* zone,
1662 TestIntVariant test_variant,
1663 bool eq_zero,
1664 Representation rep,
1665 const std::vector<int64_t>& inputs,
1666 const std::vector<int64_t>& masks) {
1667 if (!TestIntInstr::IsSupported(rep)) {
1668 return;
1669 }
1670
1671 const auto& func = BuildTestIntFunction(zone, test_variant, eq_zero, rep, {});
1672 auto invoke = [&](int64_t lhs, int64_t mask) -> bool {
1673 const auto& arg0 = Integer::Handle(Integer::New(lhs));
1674 const auto& arg1 = Integer::Handle(Integer::New(mask));
1675 EXPECT(rep == kUnboxedInt64 || arg0.IsSmi());
1676 EXPECT(rep == kUnboxedInt64 || arg1.IsSmi());
1677 const auto& result =
1678 Bool::CheckedHandle(zone, InvokeFunction(func, arg0, arg1));
1679 return result.value();
1680 };
1681
1682 for (auto& mask : masks) {
1683 TestIntTestWithImmediate(zone, test_variant, eq_zero, rep, inputs, mask);
1684
1685 // We allow non-Smi masks as immediates but not as non-constant operands.
1686 if (rep == kTagged && !Smi::IsValid(mask)) {
1687 continue;
1688 }
1689
1690 for (auto& input : inputs) {
1691 const auto expected = ((input & mask) == 0) == eq_zero;
1692 const auto got = invoke(input, mask);
1693 if (expected != got) {
1694 FAIL("testing [%s] [%s] %" Px64 " & %" Px64
1695 " %s 0: expected %s but got %s\n",
1696 test_variant == TestIntVariant::kTestBranch ? "branch" : "value",
1697 RepresentationUtils::ToCString(rep), input, mask,
1698 eq_zero ? "==" : "!=", expected ? "true" : "false",
1699 got ? "true" : "false");
1700 }
1701 }
1702 }
1703}
1704
1705ISOLATE_UNIT_TEST_CASE(IL_TestIntInstr) {
1706 const int64_t msb = static_cast<int64_t>(0x8000000000000000L);
1707 const int64_t kSmiSignBit = kSmiMax + 1;
1708
1709 const std::initializer_list<int64_t> kMasks = {
1710 1, 2, kSmiSignBit, kSmiSignBit | 1, msb, msb | 1};
1711
1712 const std::vector<std::pair<Representation, std::vector<int64_t>>> kValues = {
1713 {kTagged,
1714 {-2, -1, 0, 1, 2, 3, kSmiMax & ~1, kSmiMin & ~1, kSmiMax | 1,
1715 kSmiMin | 1}},
1716 {kUnboxedInt64,
1717 {-2, -1, 0, 1, 2, 3, kSmiMax & ~1, kSmiMin & ~1, kSmiMax | 1,
1718 kSmiMin | 1, msb, msb | 1, msb | 2}},
1719 };
1720
1721 for (auto test_variant :
1722 {TestIntVariant::kTestBranch, TestIntVariant::kTestValue}) {
1723 for (auto eq_zero : {true, false}) {
1724 for (auto& [rep, values] : kValues) {
1725 TestIntTest(thread->zone(), test_variant, eq_zero, rep, values, kMasks);
1726 }
1727 }
1728 }
1729}
1730
1731// This is a smoke test which verifies that RecordCoverage instruction is not
1732// accidentally removed by some overly eager optimization.
1733ISOLATE_UNIT_TEST_CASE(IL_RecordCoverageSurvivesOptimizations) {
1735 SetFlagScope<bool> sfs(&FLAG_reorder_basic_blocks, false);
1736
1737 TestPipeline pipeline(CompilerPass::kJIT, [&]() {
1738 FlowGraphBuilderHelper H(/*num_parameters=*/0);
1739
1740 {
1741 BlockBuilder builder(H.flow_graph(),
1742 H.flow_graph()->graph_entry()->normal_entry());
1743 const auto& coverage_array = Array::Handle(Array::New(1));
1744 coverage_array.SetAt(0, Smi::Handle(Smi::New(0)));
1745 builder.AddInstruction(
1746 new RecordCoverageInstr(coverage_array, 0, InstructionSource()));
1747 builder.AddReturn(new Value(H.flow_graph()->constant_null()));
1748 }
1749
1750 H.FinishGraph();
1751 return H.flow_graph();
1752 });
1753
1754 auto flow_graph = pipeline.RunPasses({});
1755
1756 // RecordCoverage instruction should remain in the graph.
1757 EXPECT(flow_graph->graph_entry()->normal_entry()->next()->IsRecordCoverage());
1758}
1759
1760} // namespace dart
#define test(name)
static void test_range(skiatest::Reporter *reporter)
Definition: RandomTest.cpp:167
static sk_sp< Effect > Create()
Definition: RefCntTest.cpp:117
SI void store(P *ptr, const T &val)
SI D cast(const S &v)
SI T load(const P *ptr)
Definition: Transform_inl.h:98
#define EXPECT(type, expectedAlignment, expectedSize)
Vec2Value v2
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
#define Z
GLenum type
void SetAt(intptr_t index, const Object &value) const
Definition: object.h:10880
void AddArray(const BaseGrowableArray< T, B, Allocator > &src)
void Add(const T &value)
bool Done() const
Definition: flow_graph.h:46
static const Bool & True()
Definition: object.h:10797
ClassPtr At(intptr_t cid) const
Definition: class_table.h:362
intptr_t NumCids() const
Definition: class_table.h:447
bool HasValidClassAt(intptr_t cid) const
Definition: class_table.h:386
ErrorPtr EnsureIsFinalized(Thread *thread) const
Definition: object.cc:4924
FunctionPtr LookupFactoryAllowPrivate(const String &name) const
Definition: object.cc:6163
Value * value() const
Definition: il.h:3486
PRINT_OPERANDS_TO_SUPPORT PRINT_TO_SUPPORT bool UpdateType(CompileType new_type)
Definition: il.h:2553
void set_range(const Range &)
static constexpr intptr_t kNone
Definition: deopt_id.h:27
void Fail(const char *format,...) const PRINTF_ATTRIBUTE(2
GraphEntryInstr * graph_entry() const
Definition: flow_graph.h:268
ConstantInstr * GetConstant(const Object &object, Representation representation=kTagged)
Definition: flow_graph.cc:187
Zone * zone() const
Definition: flow_graph.h:261
const Function & function() const
Definition: flow_graph.h:130
BlockIterator reverse_postorder_iterator() const
Definition: flow_graph.h:219
void InsertBefore(Instruction *next, Instruction *instr, Environment *env, UseKind use_kind)
Definition: flow_graph.h:312
FunctionEntryInstr * normal_entry() const
Definition: il.h:2001
const CidRangeVector & SubtypeRangesForClass(const Class &klass, bool include_abstract, bool exclude_null)
Definition: il.cc:110
bool CanUseSubtypeRangeCheckFor(const AbstractType &type)
Definition: il.cc:301
bool TryMatch(std::initializer_list< MatchCode > match_codes, MatchOpCode insert_before=kInvalidMatchOpCode)
void EnsureICData(FlowGraph *graph)
Definition: il.cc:5240
virtual Value * InputAt(intptr_t i) const =0
virtual bool ComputeCanDeoptimize() const =0
bool Equals(const Instruction &other) const
Definition: il.cc:619
void SetInputAt(intptr_t i, Value *value)
Definition: il.h:1014
Instruction * RemoveFromGraph(bool return_previous=true)
Definition: il.cc:1301
Instruction * previous() const
Definition: il.h:1087
ObjectPtr ptr() const
Definition: object.h:332
bool IsNull() const
Definition: object.h:363
static Object & Handle()
Definition: object.h:407
static Object & ZoneHandle()
Definition: object.h:419
void SetResultType(Zone *zone, CompileType new_type)
Definition: il.h:5648
void set_is_known_list_constructor(bool value)
Definition: il.h:5662
void RunAdditionalPasses(std::initializer_list< CompilerPass::Id > passes)
FlowGraph * RunPasses(std::initializer_list< CompilerPass::Id > passes)
Zone * zone() const
Definition: thread_state.h:37
Value * value() const
Definition: il.h:8678
Definition: il.h:75
bool BindsToConstant() const
Definition: il.cc:1183
bool Equals(const Value &other) const
Definition: il.cc:633
const Object & BoundConstant() const
Definition: il.cc:1201
Definition * definition() const
Definition: il.h:103
void LoadDImmediate(DRegister dd, double value, Register scratch, Condition cond=AL)
void Ret(Condition cond=AL)
void LoadImmediate(Register rd, Immediate value, Condition cond=AL)
#define H
#define THR_Print(format,...)
Definition: log.h:20
#define FAIL(name, result)
#define ASSERT(E)
if(end==-1)
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
static const char * expected_value
const uint8_t uint32_t uint32_t GError ** error
uint8_t value
GAsyncResult * result
Dart_NativeFunction function
Definition: fuchsia.cc:51
#define RANGES_CONTAIN_EXPECTED_CIDS(ranges, cids)
Definition: il_test.cc:499
#define EXPECT_PROPERTY(entity, property)
const GrXPFactory * Get(SkBlendMode mode)
bool IsSupported(const SkMaskFilter *maskfilter)
InvalidClass kSmiMax
const Type & ObjectType()
Definition: runtime_api.cc:173
const Type & IntType()
Definition: runtime_api.cc:181
const Type & DynamicType()
Definition: runtime_api.cc:169
InvalidClass kSmiMin
Definition: dart_vm.cc:33
constexpr int16_t kMaxInt16
Definition: globals.h:480
LibraryPtr LoadTestScript(const char *script, Dart_NativeEntryResolver resolver, const char *lib_uri)
static void WriteCidRangeVectorTo(const CidRangeVector &ranges, BaseTextBuffer *buffer)
Definition: il_test.cc:440
static const Function & BuildTestIntFunction(Zone *zone, TestIntVariant test_variant, bool eq_zero, Representation rep, std::optional< int64_t > immediate_mask)
Definition: il_test.cc:1568
@ kNew
Definition: heap_test.cc:891
TypeDataField
Definition: il_test.cc:1249
@ TypedDataView_offset_in_bytes
Definition: il_test.cc:1251
@ TypedDataBase_length
Definition: il_test.cc:1250
@ TypedDataView_typed_data
Definition: il_test.cc:1252
const RegList kAllFpuRegistersList
constexpr int32_t kMinInt32
Definition: globals.h:482
static void TestIntTest(Zone *zone, TestIntVariant test_variant, bool eq_zero, Representation rep, const std::vector< int64_t > &inputs, const std::vector< int64_t > &masks)
Definition: il_test.cc:1661
static void TestTestRangeCanonicalize(const AbstractType &type, uword lower, uword upper, bool result)
Definition: il_test.cc:1381
@ kNoStoreBarrier
Definition: il.h:6301
static void TestRepresentationChangeDuringCanonicalization(Thread *thread, bool allow_representation_change)
Definition: il_test.cc:1172
@ kNullCid
Definition: class_id.h:252
@ kVoidCid
Definition: class_id.h:254
@ kDynamicCid
Definition: class_id.h:253
@ kNeverCid
Definition: class_id.h:255
Representation
Definition: locations.h:66
constexpr uint32_t kMaxUint32
Definition: globals.h:484
GrowableArray< Value * > InputsArray
Definition: il.h:901
static void TestCanonicalizationOfTypedDataViewFieldLoads(Thread *thread, TypeDataField field_kind)
Definition: il_test.cc:1255
ObjectPtr Invoke(const Library &lib, const char *name)
FunctionPtr GetFunction(const Library &lib, const char *name)
static void RunInitializingStoresTest(const Library &root_library, const char *function_name, CompilerPass::PipelineMode mode, const std::vector< const char * > &expected_stores)
Definition: il_test.cc:114
static ObjectPtr InvokeFunction(const Function &function, Args &... args)
Definition: il_test.cc:1536
uintptr_t uword
Definition: globals.h:501
bool TestIntConverterCanonicalizationRule(Thread *thread, int64_t min_value, int64_t max_value, Representation initial, Representation intermediate, Representation final)
Definition: il_test.cc:197
static void TestNullAwareEqualityCompareCanonicalization(Thread *thread, bool allow_representation_change)
Definition: il_test.cc:397
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
const int kNumberOfFpuRegisters
static const char * Concat(const char *a, const char *b)
Definition: file_test.cc:86
static void TestConstantFoldToSmi(const Library &root_library, const char *function_name, CompilerPass::PipelineMode mode, intptr_t expected_value)
Definition: il_test.cc:1099
static void ExpectStores(FlowGraph *flow_graph, const std::vector< const char * > &expected_stores)
Definition: il_test.cc:97
ISOLATE_UNIT_TEST_CASE(StackAllocatedDestruction)
static void TestIntTestWithImmediate(Zone *zone, TestIntVariant test_variant, bool eq_zero, Representation rep, const std::vector< int64_t > &inputs, int64_t mask)
Definition: il_test.cc:1632
const RegList kDartVolatileCpuRegs
const intptr_t cid
static void RangesContainExpectedCids(const Expect &expect, const CidRangeVector &ranges, const GrowableArray< intptr_t > &expected)
Definition: il_test.cc:471
static constexpr Representation kUnboxedAddress
Definition: locations.h:182
constexpr int32_t kMaxInt32
Definition: globals.h:483
static void WriteCidTo(intptr_t cid, BaseTextBuffer *buffer)
Definition: il_test.cc:384
static constexpr Representation kUnboxedIntPtr
Definition: locations.h:176
constexpr int16_t kMinInt16
Definition: globals.h:479
TestIntVariant
Definition: il_test.cc:1563
static bool ExpectRangesContainCid(const Expect &expect, const CidRangeVector &ranges, intptr_t expected)
Definition: il_test.cc:456
const char *const function_name
void TestStaticFieldForwarding(Thread *thread, const Class &test_cls, const Field &field, intptr_t num_stores, bool expected_to_forward)
Definition: il_test.cc:1424
InstructionsPtr BuildInstructions(std::function< void(compiler::Assembler *assembler)> fun)
const RegList kAbiVolatileFpuRegs
static constexpr intptr_t kInvalidTryIndex
static const Function & BuildTestFunction(intptr_t num_parameters, std::function< void(FlowGraphBuilderHelper &)> build_graph)
Definition: il_test.cc:1543
static constexpr intptr_t kClassIdTagMax
Definition: class_id.h:22
@ kAlignedAccess
Definition: il.h:6766
FlowGraph * SetupFfiFlowgraph(TestPipeline *pipeline, const compiler::ffi::CallMarshaller &marshaller, uword native_entry, bool is_leaf)
Definition: il_test.cc:865
def call(args)
Definition: dom.py:159
@ kNone
Definition: layer.h:53
DEF_SWITCHES_START aot vmservice shared library name
Definition: switches.h:32
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
Definition: switches.h:228
#define Px64
Definition: globals.h:418
#define Pd
Definition: globals.h:408
int compare(const void *untyped_lhs, const void *untyped_rhs)
Definition: skdiff.h:161
static SkString join(const CommandLineFlags::StringArray &)
Definition: skpbench.cpp:741
Definition: SkMD5.cpp:130
#define ISOLATE_UNIT_TEST_CASE(name)
Definition: unit_test.h:64