Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
il_test.cc
Go to the documentation of this file.
1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6
7#include <vector>
8
10#include "platform/utils.h"
11#include "vm/class_id.h"
17#include "vm/unit_test.h"
18
19namespace dart {
20
21ISOLATE_UNIT_TEST_CASE(InstructionTests) {
22 TargetEntryInstr* target_instr =
24 EXPECT(target_instr->IsBlockEntry());
25 EXPECT(!target_instr->IsDefinition());
26}
27
28ISOLATE_UNIT_TEST_CASE(OptimizationTests) {
29 JoinEntryInstr* join =
31
32 Definition* def1 = new PhiInstr(join, 0);
33 Definition* def2 = new PhiInstr(join, 0);
34 Value* use1a = new Value(def1);
35 Value* use1b = new Value(def1);
36 EXPECT(use1a->Equals(*use1b));
37 Value* use2 = new Value(def2);
38 EXPECT(!use2->Equals(*use1a));
39
42 EXPECT(c1->Equals(*c2));
45 EXPECT(c3->Equals(*c4));
46 EXPECT(!c3->Equals(*c1));
47}
48
49ISOLATE_UNIT_TEST_CASE(IRTest_EliminateWriteBarrier) {
50 const char* nullable_tag = TestCase::NullableTag();
51 // clang-format off
52 auto kScript = Utils::CStringUniquePtr(OS::SCreate(nullptr, R"(
53 class Container<T> {
54 operator []=(var index, var value) {
55 return data[index] = value;
56 }
57
58 List<T%s> data = List<T%s>.filled(10, null);
59 }
60
61 Container<int> x = Container<int>();
62
63 foo() {
64 for (int i = 0; i < 10; ++i) {
65 x[i] = i;
66 }
67 }
68 )", nullable_tag, nullable_tag), std::free);
69 // clang-format on
70
71 const auto& root_library = Library::Handle(LoadTestScript(kScript.get()));
72 const auto& function = Function::Handle(GetFunction(root_library, "foo"));
73
74 Invoke(root_library, "foo");
75
77 FlowGraph* flow_graph = pipeline.RunPasses({});
78
79 auto entry = flow_graph->graph_entry()->normal_entry();
80 EXPECT(entry != nullptr);
81
82 StoreIndexedInstr* store_indexed = nullptr;
83
84 ILMatcher cursor(flow_graph, entry, true);
86 kMoveGlob,
87 kMatchAndMoveBranchTrue,
88 kMoveGlob,
89 {kMatchStoreIndexed, &store_indexed},
90 }));
91
92 EXPECT(!store_indexed->value()->NeedsWriteBarrier());
93}
94
95static void ExpectStores(FlowGraph* flow_graph,
96 const std::vector<const char*>& expected_stores) {
97 size_t next_expected_store = 0;
98 for (BlockIterator block_it = flow_graph->reverse_postorder_iterator();
99 !block_it.Done(); block_it.Advance()) {
100 for (ForwardInstructionIterator it(block_it.Current()); !it.Done();
101 it.Advance()) {
102 if (auto store = it.Current()->AsStoreField()) {
103 EXPECT_LT(next_expected_store, expected_stores.size());
104 EXPECT_STREQ(expected_stores[next_expected_store],
105 store->slot().Name());
106 next_expected_store++;
107 }
108 }
109 }
110}
111
113 const Library& root_library,
114 const char* function_name,
116 const std::vector<const char*>& expected_stores) {
117 const auto& function =
118 Function::Handle(GetFunction(root_library, function_name));
119 TestPipeline pipeline(function, mode);
120 FlowGraph* flow_graph = pipeline.RunPasses({
121 CompilerPass::kComputeSSA,
122 CompilerPass::kTypePropagation,
123 CompilerPass::kApplyICData,
124 CompilerPass::kInlining,
125 CompilerPass::kTypePropagation,
126 CompilerPass::kSelectRepresentations,
127 CompilerPass::kCanonicalize,
128 CompilerPass::kConstantPropagation,
129 });
130 ASSERT(flow_graph != nullptr);
131 ExpectStores(flow_graph, expected_stores);
132}
133
134ISOLATE_UNIT_TEST_CASE(IRTest_InitializingStores) {
135 // clang-format off
136 auto kScript = Utils::CStringUniquePtr(OS::SCreate(nullptr, R"(
137 class Bar {
138 var f;
139 var g;
140
141 Bar({this.f, this.g});
142 }
143 Bar f1() => Bar(f: 10);
144 Bar f2() => Bar(g: 10);
145 f3() {
146 return () { };
147 }
148 f4<T>({T%s value}) {
149 return () { return value; };
150 }
151 main() {
152 f1();
153 f2();
154 f3();
155 f4();
156 }
157 )",
158 TestCase::NullableTag()), std::free);
159 // clang-format on
160
161 const auto& root_library = Library::Handle(LoadTestScript(kScript.get()));
162 Invoke(root_library, "main");
163
164 RunInitializingStoresTest(root_library, "f1", CompilerPass::kJIT,
165 /*expected_stores=*/{"f"});
166 RunInitializingStoresTest(root_library, "f2", CompilerPass::kJIT,
167 /*expected_stores=*/{"g"});
168 RunInitializingStoresTest(root_library, "f3", CompilerPass::kJIT,
169 /*expected_stores=*/
170 {"Closure.function", "Closure.entry_point"});
171
172 // Note that in JIT mode we lower context allocation in a way that hinders
173 // removal of initializing moves so there would be some redundant stores of
174 // null left in the graph. In AOT mode we don't apply this optimization
175 // which enables us to remove more stores.
176 std::vector<const char*> expected_stores_jit;
177 std::vector<const char*> expected_stores_aot;
178
179 expected_stores_jit.insert(
180 expected_stores_jit.end(),
181 {"value", "Context.parent", "Context.parent", "value",
182 "Closure.function_type_arguments", "Closure.context"});
183 expected_stores_aot.insert(
184 expected_stores_aot.end(),
185 {"value", "Closure.function_type_arguments", "Closure.context"});
186
187 RunInitializingStoresTest(root_library, "f4", CompilerPass::kJIT,
188 expected_stores_jit);
189 RunInitializingStoresTest(root_library, "f4", CompilerPass::kAOT,
190 expected_stores_aot);
191}
192
193// Returns |true| if compiler canonicalizes away a chain of IntConverters going
194// from |initial| representation to |intermediate| representation and then
195// back to |initial| given that initial value has range [min_value, max_value].
197 int64_t min_value,
198 int64_t max_value,
199 Representation initial,
200 Representation intermediate,
201 Representation final) {
203
204 CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
205
206 FlowGraphBuilderHelper H(/*num_parameters=*/1);
207 H.AddVariable("v0", AbstractType::ZoneHandle(Type::IntType()));
208
209 auto normal_entry = H.flow_graph()->graph_entry()->normal_entry();
210
211 Definition* v0;
212 DartReturnInstr* ret;
213
214 {
215 BlockBuilder builder(H.flow_graph(), normal_entry);
216 v0 = builder.AddParameter(0, initial);
217 v0->set_range(Range(RangeBoundary::FromConstant(min_value),
218 RangeBoundary::FromConstant(max_value)));
219 auto conv1 = builder.AddDefinition(new IntConverterInstr(
220 initial, intermediate, new Value(v0), S.GetNextDeoptId()));
221 auto conv2 = builder.AddDefinition(new IntConverterInstr(
222 intermediate, initial, new Value(conv1), S.GetNextDeoptId()));
223 ret = builder.AddReturn(new Value(conv2));
224 }
225
226 H.FinishGraph();
227
228 H.flow_graph()->Canonicalize();
229 H.flow_graph()->Canonicalize();
230
231 return ret->value()->definition() == v0;
232}
233
234ISOLATE_UNIT_TEST_CASE(IL_IntConverterCanonicalization) {
236 kUnboxedInt64, kUnboxedInt32,
237 kUnboxedInt64));
239 kUnboxedInt64, kUnboxedInt32,
240 kUnboxedInt64));
242 thread, kMinInt32, static_cast<int64_t>(kMaxInt32) + 1, kUnboxedInt64,
243 kUnboxedInt32, kUnboxedInt64));
245 thread, 0, kMaxInt16, kUnboxedInt64, kUnboxedUint32, kUnboxedInt64));
247 thread, 0, kMaxInt32, kUnboxedInt64, kUnboxedUint32, kUnboxedInt64));
249 thread, 0, kMaxUint32, kUnboxedInt64, kUnboxedUint32, kUnboxedInt64));
251 thread, 0, static_cast<int64_t>(kMaxUint32) + 1, kUnboxedInt64,
252 kUnboxedUint32, kUnboxedInt64));
254 thread, -1, kMaxInt16, kUnboxedInt64, kUnboxedUint32, kUnboxedInt64));
255
256 // Regression test for https://dartbug.com/53613.
258 kUnboxedInt32, kUnboxedUint32,
259 kUnboxedInt64));
261 kUnboxedInt32, kUnboxedUint32,
262 kUnboxedInt32));
264 thread, 0, kMaxInt32, kUnboxedInt32, kUnboxedUint32, kUnboxedInt64));
266 thread, 0, kMaxInt32, kUnboxedInt32, kUnboxedUint32, kUnboxedInt32));
267}
268
269ISOLATE_UNIT_TEST_CASE(IL_PhiCanonicalization) {
271
272 CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
273
274 FlowGraphBuilderHelper H(/*num_parameters=*/1);
275 H.AddVariable("v0", AbstractType::ZoneHandle(Type::DynamicType()));
276
277 auto normal_entry = H.flow_graph()->graph_entry()->normal_entry();
278 auto b2 = H.JoinEntry();
279 auto b3 = H.TargetEntry();
280 auto b4 = H.TargetEntry();
281
282 Definition* v0;
283 DartReturnInstr* ret;
284 PhiInstr* phi;
285
286 {
287 BlockBuilder builder(H.flow_graph(), normal_entry);
288 v0 = builder.AddParameter(0, kTagged);
289 builder.AddInstruction(new GotoInstr(b2, S.GetNextDeoptId()));
290 }
291
292 {
293 BlockBuilder builder(H.flow_graph(), b2);
294 phi = new PhiInstr(b2, 2);
295 phi->SetInputAt(0, new Value(v0));
296 phi->SetInputAt(1, new Value(phi));
297 builder.AddPhi(phi);
298 builder.AddBranch(new StrictCompareInstr(
299 InstructionSource(), Token::kEQ_STRICT,
300 new Value(H.IntConstant(1)), new Value(phi),
301 /*needs_number_check=*/false, S.GetNextDeoptId()),
302 b3, b4);
303 }
304
305 {
306 BlockBuilder builder(H.flow_graph(), b3);
307 builder.AddInstruction(new GotoInstr(b2, S.GetNextDeoptId()));
308 }
309
310 {
311 BlockBuilder builder(H.flow_graph(), b4);
312 ret = builder.AddReturn(new Value(phi));
313 }
314
315 H.FinishGraph();
316
317 H.flow_graph()->Canonicalize();
318
319 EXPECT(ret->value()->definition() == v0);
320}
321
322// Regression test for issue 46018.
323ISOLATE_UNIT_TEST_CASE(IL_UnboxIntegerCanonicalization) {
325
326 CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
327
328 FlowGraphBuilderHelper H(/*num_parameters=*/2);
329 H.AddVariable("v0", AbstractType::ZoneHandle(Type::DynamicType()));
330 H.AddVariable("v1", AbstractType::ZoneHandle(Type::DynamicType()));
331
332 auto normal_entry = H.flow_graph()->graph_entry()->normal_entry();
333 Definition* unbox;
334
335 {
336 BlockBuilder builder(H.flow_graph(), normal_entry);
337 Definition* index = H.IntConstant(0);
338 Definition* int_type =
339 H.flow_graph()->GetConstant(Type::Handle(Type::IntType()));
340
341 Definition* float64_array = builder.AddParameter(0, kTagged);
342 Definition* int64_array = builder.AddParameter(1, kTagged);
343
344 Definition* load_indexed = builder.AddDefinition(new LoadIndexedInstr(
345 new Value(float64_array), new Value(index),
346 /* index_unboxed */ false,
347 /* index_scale */ 8, kTypedDataFloat64ArrayCid, kAlignedAccess,
348 S.GetNextDeoptId(), InstructionSource()));
349 Definition* box = builder.AddDefinition(
350 BoxInstr::Create(kUnboxedDouble, new Value(load_indexed)));
351 Definition* cast = builder.AddDefinition(new AssertAssignableInstr(
352 InstructionSource(), new Value(box), new Value(int_type),
353 /* instantiator_type_arguments */
354 new Value(H.flow_graph()->constant_null()),
355 /* function_type_arguments */
356 new Value(H.flow_graph()->constant_null()),
357 /* dst_name */ String::Handle(String::New("not-null")),
358 S.GetNextDeoptId()));
359 unbox = builder.AddDefinition(new UnboxInt64Instr(
360 new Value(cast), S.GetNextDeoptId(), BoxInstr::kGuardInputs));
361
362 builder.AddInstruction(new StoreIndexedInstr(
363 new Value(int64_array), new Value(index), new Value(unbox),
365 /* index_unboxed */ false,
366 /* index_scale */ 8, kTypedDataInt64ArrayCid, kAlignedAccess,
367 S.GetNextDeoptId(), InstructionSource()));
368 builder.AddReturn(new Value(index));
369 }
370
371 H.FinishGraph();
372
373 FlowGraphTypePropagator::Propagate(H.flow_graph());
374 EXPECT(!unbox->ComputeCanDeoptimize());
375
376 H.flow_graph()->Canonicalize();
377 EXPECT(!unbox->ComputeCanDeoptimize());
378
379 H.flow_graph()->RemoveRedefinitions();
380 EXPECT(!unbox->ComputeCanDeoptimize()); // Previously this reverted to true.
381}
382
383static void WriteCidTo(intptr_t cid, BaseTextBuffer* buffer) {
384 ClassTable* const class_table = IsolateGroup::Current()->class_table();
385 buffer->Printf("%" Pd "", cid);
386 if (class_table->HasValidClassAt(cid)) {
387 const auto& cls = Class::Handle(class_table->At(cid));
388 buffer->Printf(" (%s", cls.ScrubbedNameCString());
389 if (cls.is_abstract()) {
390 buffer->AddString(", abstract");
391 }
392 buffer->AddString(")");
393 }
394}
395
397 Thread* thread,
398 bool allow_representation_change) {
400
401 CompilerState S(thread, /*is_aot=*/true, /*is_optimizing=*/true);
402
403 FlowGraphBuilderHelper H(/*num_parameters=*/2);
404 H.AddVariable("v0", AbstractType::ZoneHandle(Type::IntType()));
405 H.AddVariable("v1", AbstractType::ZoneHandle(Type::IntType()));
406
407 auto normal_entry = H.flow_graph()->graph_entry()->normal_entry();
408
409 EqualityCompareInstr* compare = nullptr;
410 {
411 BlockBuilder builder(H.flow_graph(), normal_entry);
412 Definition* v0 = builder.AddParameter(0, kUnboxedInt64);
413 Definition* v1 = builder.AddParameter(1, kUnboxedInt64);
414 Definition* box0 = builder.AddDefinition(new BoxInt64Instr(new Value(v0)));
415 Definition* box1 = builder.AddDefinition(new BoxInt64Instr(new Value(v1)));
416
417 compare = builder.AddDefinition(new EqualityCompareInstr(
418 InstructionSource(), Token::kEQ, new Value(box0), new Value(box1),
419 kMintCid, S.GetNextDeoptId(), /*null_aware=*/true));
420 builder.AddReturn(new Value(compare));
421 }
422
423 H.FinishGraph();
424
425 if (!allow_representation_change) {
426 H.flow_graph()->disallow_unmatched_representations();
427 }
428
429 H.flow_graph()->Canonicalize();
430
431 EXPECT(compare->is_null_aware() == !allow_representation_change);
432}
433
434ISOLATE_UNIT_TEST_CASE(IL_Canonicalize_EqualityCompare) {
437}
438
439static void WriteCidRangeVectorTo(const CidRangeVector& ranges,
441 if (ranges.is_empty()) {
442 buffer->AddString("empty CidRangeVector");
443 return;
444 }
445 buffer->AddString("non-empty CidRangeVector:\n");
446 for (const auto& range : ranges) {
447 for (intptr_t cid = range.cid_start; cid <= range.cid_end; cid++) {
448 buffer->AddString(" * ");
450 buffer->AddString("\n");
451 }
452 }
453}
454
455static bool ExpectRangesContainCid(const Expect& expect,
456 const CidRangeVector& ranges,
457 intptr_t expected) {
458 for (const auto& range : ranges) {
459 for (intptr_t cid = range.cid_start; cid <= range.cid_end; cid++) {
460 if (expected == cid) return true;
461 }
462 }
463 TextBuffer buffer(128);
464 buffer.AddString("Expected CidRangeVector to include cid ");
465 WriteCidTo(expected, &buffer);
466 expect.Fail("%s", buffer.buffer());
467 return false;
468}
469
470static void RangesContainExpectedCids(const Expect& expect,
471 const CidRangeVector& ranges,
472 const GrowableArray<intptr_t>& expected) {
473 ASSERT(!ranges.is_empty());
474 ASSERT(!expected.is_empty());
475 {
476 TextBuffer buffer(128);
477 buffer.AddString("Checking that ");
479 buffer.AddString("includes cids:\n");
480 for (const intptr_t cid : expected) {
481 buffer.AddString(" * ");
483 buffer.AddString("\n");
484 }
485 THR_Print("%s", buffer.buffer());
486 }
487 bool all_found = true;
488 for (const intptr_t cid : expected) {
489 if (!ExpectRangesContainCid(expect, ranges, cid)) {
490 all_found = false;
491 }
492 }
493 if (all_found) {
494 THR_Print("All expected cids included.\n\n");
495 }
496}
497
498#define RANGES_CONTAIN_EXPECTED_CIDS(ranges, cids) \
499 RangesContainExpectedCids(dart::Expect(__FILE__, __LINE__), ranges, cids)
500
501ISOLATE_UNIT_TEST_CASE(HierarchyInfo_Object_Subtype) {
502 HierarchyInfo hi(thread);
503 const auto& type =
504 Type::Handle(IsolateGroup::Current()->object_store()->object_type());
505 const bool is_nullable = Instance::NullIsAssignableTo(type);
507 const auto& cls = Class::Handle(type.type_class());
508
509 ClassTable* const class_table = thread->isolate_group()->class_table();
510 const intptr_t num_cids = class_table->NumCids();
511 auto& to_check = Class::Handle(thread->zone());
512 auto& rare_type = AbstractType::Handle(thread->zone());
513
514 GrowableArray<intptr_t> expected_concrete_cids;
515 GrowableArray<intptr_t> expected_abstract_cids;
516 for (intptr_t cid = kInstanceCid; cid < num_cids; cid++) {
517 if (!class_table->HasValidClassAt(cid)) continue;
518 if (cid == kNullCid) continue;
519 if (cid == kNeverCid) continue;
520 if (cid == kDynamicCid && !is_nullable) continue;
521 if (cid == kVoidCid && !is_nullable) continue;
522 to_check = class_table->At(cid);
523 // Only add concrete classes.
524 if (to_check.is_abstract()) {
525 expected_abstract_cids.Add(cid);
526 } else {
527 expected_concrete_cids.Add(cid);
528 }
529 if (cid != kTypeArgumentsCid) { // Cannot call RareType() on this.
530 rare_type = to_check.RareType();
531 EXPECT(rare_type.IsSubtypeOf(type, Heap::kNew));
532 }
533 }
534
535 const CidRangeVector& concrete_range = hi.SubtypeRangesForClass(
536 cls, /*include_abstract=*/false, /*exclude_null=*/!is_nullable);
537 RANGES_CONTAIN_EXPECTED_CIDS(concrete_range, expected_concrete_cids);
538
539 GrowableArray<intptr_t> expected_cids;
540 expected_cids.AddArray(expected_concrete_cids);
541 expected_cids.AddArray(expected_abstract_cids);
542 const CidRangeVector& abstract_range = hi.SubtypeRangesForClass(
543 cls, /*include_abstract=*/true, /*exclude_null=*/!is_nullable);
544 RANGES_CONTAIN_EXPECTED_CIDS(abstract_range, expected_cids);
545}
546
547ISOLATE_UNIT_TEST_CASE(HierarchyInfo_Function_Subtype) {
548 HierarchyInfo hi(thread);
549 const auto& type =
550 Type::Handle(IsolateGroup::Current()->object_store()->function_type());
552 const auto& cls = Class::Handle(type.type_class());
553
554 GrowableArray<intptr_t> expected_concrete_cids;
555 expected_concrete_cids.Add(kClosureCid);
556
557 GrowableArray<intptr_t> expected_abstract_cids;
558 expected_abstract_cids.Add(type.type_class_id());
559
560 const CidRangeVector& concrete_range = hi.SubtypeRangesForClass(
561 cls, /*include_abstract=*/false, /*exclude_null=*/true);
562 RANGES_CONTAIN_EXPECTED_CIDS(concrete_range, expected_concrete_cids);
563
564 GrowableArray<intptr_t> expected_cids;
565 expected_cids.AddArray(expected_concrete_cids);
566 expected_cids.AddArray(expected_abstract_cids);
567 const CidRangeVector& abstract_range = hi.SubtypeRangesForClass(
568 cls, /*include_abstract=*/true, /*exclude_null=*/true);
569 RANGES_CONTAIN_EXPECTED_CIDS(abstract_range, expected_cids);
570}
571
572ISOLATE_UNIT_TEST_CASE(HierarchyInfo_Num_Subtype) {
573 HierarchyInfo hi(thread);
574 const auto& num_type = Type::Handle(Type::Number());
575 const auto& int_type = Type::Handle(Type::IntType());
576 const auto& double_type = Type::Handle(Type::Double());
578 const auto& cls = Class::Handle(num_type.type_class());
579
580 GrowableArray<intptr_t> expected_concrete_cids;
581 expected_concrete_cids.Add(kSmiCid);
582 expected_concrete_cids.Add(kMintCid);
583 expected_concrete_cids.Add(kDoubleCid);
584
585 GrowableArray<intptr_t> expected_abstract_cids;
586 expected_abstract_cids.Add(num_type.type_class_id());
587 expected_abstract_cids.Add(int_type.type_class_id());
588 expected_abstract_cids.Add(double_type.type_class_id());
589
590 const CidRangeVector& concrete_range = hi.SubtypeRangesForClass(
591 cls, /*include_abstract=*/false, /*exclude_null=*/true);
592 RANGES_CONTAIN_EXPECTED_CIDS(concrete_range, expected_concrete_cids);
593
594 GrowableArray<intptr_t> expected_cids;
595 expected_cids.AddArray(expected_concrete_cids);
596 expected_cids.AddArray(expected_abstract_cids);
597 const CidRangeVector& abstract_range = hi.SubtypeRangesForClass(
598 cls, /*include_abstract=*/true, /*exclude_null=*/true);
599 RANGES_CONTAIN_EXPECTED_CIDS(abstract_range, expected_cids);
600}
601
602ISOLATE_UNIT_TEST_CASE(HierarchyInfo_Int_Subtype) {
603 HierarchyInfo hi(thread);
604 const auto& type = Type::Handle(Type::IntType());
606 const auto& cls = Class::Handle(type.type_class());
607
608 GrowableArray<intptr_t> expected_concrete_cids;
609 expected_concrete_cids.Add(kSmiCid);
610 expected_concrete_cids.Add(kMintCid);
611
612 GrowableArray<intptr_t> expected_abstract_cids;
613 expected_abstract_cids.Add(type.type_class_id());
614
615 const CidRangeVector& concrete_range = hi.SubtypeRangesForClass(
616 cls, /*include_abstract=*/false, /*exclude_null=*/true);
617 RANGES_CONTAIN_EXPECTED_CIDS(concrete_range, expected_concrete_cids);
618
619 GrowableArray<intptr_t> expected_cids;
620 expected_cids.AddArray(expected_concrete_cids);
621 expected_cids.AddArray(expected_abstract_cids);
622 const CidRangeVector& abstract_range = hi.SubtypeRangesForClass(
623 cls, /*include_abstract=*/true, /*exclude_null=*/true);
624 RANGES_CONTAIN_EXPECTED_CIDS(abstract_range, expected_cids);
625}
626
627ISOLATE_UNIT_TEST_CASE(HierarchyInfo_String_Subtype) {
628 HierarchyInfo hi(thread);
629 const auto& type = Type::Handle(Type::StringType());
631 const auto& cls = Class::Handle(type.type_class());
632
633 GrowableArray<intptr_t> expected_concrete_cids;
634 expected_concrete_cids.Add(kOneByteStringCid);
635 expected_concrete_cids.Add(kTwoByteStringCid);
636
637 GrowableArray<intptr_t> expected_abstract_cids;
638 expected_abstract_cids.Add(type.type_class_id());
639
640 const CidRangeVector& concrete_range = hi.SubtypeRangesForClass(
641 cls, /*include_abstract=*/false, /*exclude_null=*/true);
642 THR_Print("Checking concrete subtype ranges for String\n");
643 RANGES_CONTAIN_EXPECTED_CIDS(concrete_range, expected_concrete_cids);
644
645 GrowableArray<intptr_t> expected_cids;
646 expected_cids.AddArray(expected_concrete_cids);
647 expected_cids.AddArray(expected_abstract_cids);
648 const CidRangeVector& abstract_range = hi.SubtypeRangesForClass(
649 cls, /*include_abstract=*/true, /*exclude_null=*/true);
650 THR_Print("Checking concrete and abstract subtype ranges for String\n");
651 RANGES_CONTAIN_EXPECTED_CIDS(abstract_range, expected_cids);
652}
653
654// This test verifies that double == Smi is recognized and
655// implemented using EqualityCompare.
656// Regression test for https://github.com/dart-lang/sdk/issues/47031.
657ISOLATE_UNIT_TEST_CASE(IRTest_DoubleEqualsSmi) {
658 const char* kScript = R"(
659 bool foo(double x) => (x + 0.5) == 0;
660 main() {
661 foo(-0.5);
662 }
663 )";
664
665 const auto& root_library = Library::Handle(LoadTestScript(kScript));
666 const auto& function = Function::Handle(GetFunction(root_library, "foo"));
667
668 TestPipeline pipeline(function, CompilerPass::kAOT);
669 FlowGraph* flow_graph = pipeline.RunPasses({});
670
671 auto entry = flow_graph->graph_entry()->normal_entry();
672 ILMatcher cursor(flow_graph, entry, /*trace=*/true,
673 ParallelMovesHandling::kSkip);
674
675 RELEASE_ASSERT(cursor.TryMatch({
676 kMoveGlob,
677 kMatchAndMoveBinaryDoubleOp,
678 kMatchAndMoveEqualityCompare,
679 kMatchDartReturn,
680 }));
681}
682
683ISOLATE_UNIT_TEST_CASE(IRTest_LoadThread) {
684 // clang-format off
685 auto kScript = R"(
686 import 'dart:ffi';
687
688 int myFunction() {
689 return 100;
690 }
691
692 void anotherFunction() {}
693 )";
694 // clang-format on
695
696 const auto& root_library = Library::Handle(LoadTestScript(kScript));
697 Zone* const zone = Thread::Current()->zone();
698 auto& invoke_result = Instance::Handle(zone);
699 invoke_result ^= Invoke(root_library, "myFunction");
700 EXPECT_EQ(Smi::New(100), invoke_result.ptr());
701
702 const auto& my_function =
703 Function::Handle(GetFunction(root_library, "myFunction"));
704
705 TestPipeline pipeline(my_function, CompilerPass::kJIT);
706 FlowGraph* flow_graph = pipeline.RunPasses({
707 CompilerPass::kComputeSSA,
708 });
709
710 DartReturnInstr* return_instr = nullptr;
711 {
712 ILMatcher cursor(flow_graph, flow_graph->graph_entry()->normal_entry());
713
714 EXPECT(cursor.TryMatch({
715 kMoveGlob,
716 {kMatchDartReturn, &return_instr},
717 }));
718 }
719
720 auto* const load_thread_instr = new (zone) LoadThreadInstr();
721 flow_graph->InsertBefore(return_instr, load_thread_instr, nullptr,
722 FlowGraph::kValue);
723 auto load_thread_value = Value(load_thread_instr);
724
725 auto* const convert_instr = new (zone) IntConverterInstr(
726 kUntagged, kUnboxedAddress, &load_thread_value, DeoptId::kNone);
727 flow_graph->InsertBefore(return_instr, convert_instr, nullptr,
728 FlowGraph::kValue);
729 auto convert_value = Value(convert_instr);
730
731 auto* const box_instr = BoxInstr::Create(kUnboxedAddress, &convert_value);
732 flow_graph->InsertBefore(return_instr, box_instr, nullptr, FlowGraph::kValue);
733
734 return_instr->InputAt(0)->definition()->ReplaceUsesWith(box_instr);
735
736 {
737 // Check we constructed the right graph.
738 ILMatcher cursor(flow_graph, flow_graph->graph_entry()->normal_entry());
739 EXPECT(cursor.TryMatch({
740 kMoveGlob,
741 kMatchAndMoveLoadThread,
742 kMatchAndMoveIntConverter,
743 kMatchAndMoveBox,
744 kMatchDartReturn,
745 }));
746 }
747
748 pipeline.RunForcedOptimizedAfterSSAPasses();
749
750 {
751#if !defined(PRODUCT) && !defined(USING_THREAD_SANITIZER)
752 SetFlagScope<bool> sfs(&FLAG_disassemble_optimized, true);
753#endif
754 pipeline.CompileGraphAndAttachFunction();
755 }
756
757 // Ensure we can successfully invoke the function.
758 invoke_result ^= Invoke(root_library, "myFunction");
759 intptr_t result_int = Integer::Cast(invoke_result).AsInt64Value();
760 EXPECT_EQ(reinterpret_cast<intptr_t>(thread), result_int);
761}
762
763#if !defined(TARGET_ARCH_IA32)
764ISOLATE_UNIT_TEST_CASE(IRTest_CachableIdempotentCall) {
765 // clang-format off
766 auto kScript = Utils::CStringUniquePtr(OS::SCreate(nullptr, R"(
767 int globalCounter = 0;
768
769 int increment() => ++globalCounter;
770
771 int cachedIncrement() {
772 // We will replace this call with a cacheable call,
773 // which will lead to the counter no longer being incremented.
774 // Make sure to return the value, so we can see that the boxing and
775 // unboxing works as expected.
776 return increment();
777 }
778
779 int multipleIncrement() {
780 int returnValue = 0;
781 for(int i = 0; i < 10; i++) {
782 // Save the last returned value.
783 returnValue = cachedIncrement();
784 }
785 return returnValue;
786 }
787 )"), std::free);
788 // clang-format on
789
790 const auto& root_library = Library::Handle(LoadTestScript(kScript.get()));
791 const auto& first_result =
792 Object::Handle(Invoke(root_library, "multipleIncrement"));
793 EXPECT(first_result.IsSmi());
794 if (first_result.IsSmi()) {
795 const intptr_t int_value = Smi::Cast(first_result).Value();
796 EXPECT_EQ(10, int_value);
797 }
798
799 const auto& cached_increment_function =
800 Function::Handle(GetFunction(root_library, "cachedIncrement"));
801
802 const auto& increment_function =
803 Function::ZoneHandle(GetFunction(root_library, "increment"));
804
805 TestPipeline pipeline(cached_increment_function, CompilerPass::kJIT);
806 FlowGraph* flow_graph = pipeline.RunPasses({
807 CompilerPass::kComputeSSA,
808 });
809
810 StaticCallInstr* static_call = nullptr;
811 {
812 ILMatcher cursor(flow_graph, flow_graph->graph_entry()->normal_entry());
813
814 EXPECT(cursor.TryMatch({
815 kMoveGlob,
816 {kMatchAndMoveStaticCall, &static_call},
817 kMoveGlob,
818 kMatchDartReturn,
819 }));
820 }
821
822 InputsArray args;
823 CachableIdempotentCallInstr* call = new CachableIdempotentCallInstr(
824 InstructionSource(), kUnboxedAddress, increment_function,
825 static_call->type_args_len(), Array::empty_array(), std::move(args),
826 DeoptId::kNone);
827 static_call->ReplaceWith(call, nullptr);
828
829 pipeline.RunForcedOptimizedAfterSSAPasses();
830
831 {
832 ILMatcher cursor(flow_graph, flow_graph->graph_entry()->normal_entry());
833
834 EXPECT(cursor.TryMatch({
835 kMoveGlob,
836 kMatchAndMoveCachableIdempotentCall,
837 kMoveGlob,
838 // The cacheable call returns unboxed, so select representations
839 // adds boxing.
840 kMatchBox,
841 kMoveGlob,
842 kMatchDartReturn,
843 }));
844 }
845
846 {
847#if !defined(PRODUCT)
848 SetFlagScope<bool> sfs(&FLAG_disassemble_optimized, true);
849#endif
850 pipeline.CompileGraphAndAttachFunction();
851 }
852
853 const auto& second_result =
854 Object::Handle(Invoke(root_library, "multipleIncrement"));
855 EXPECT(second_result.IsSmi());
856 if (second_result.IsSmi()) {
857 const intptr_t int_value = Smi::Cast(second_result).Value();
858 EXPECT_EQ(11, int_value);
859 }
860}
861#endif
862
863// Helper to set up an inlined FfiCall by replacing a StaticCall.
865 const compiler::ffi::CallMarshaller& marshaller,
866 uword native_entry,
867 bool is_leaf) {
868 FlowGraph* flow_graph = pipeline->RunPasses({CompilerPass::kComputeSSA});
869
870 {
871 // Locate the placeholder call.
872 StaticCallInstr* static_call = nullptr;
873 {
874 ILMatcher cursor(flow_graph, flow_graph->graph_entry()->normal_entry(),
875 /*trace=*/false);
876 cursor.TryMatch({kMoveGlob, {kMatchStaticCall, &static_call}});
877 }
878 RELEASE_ASSERT(static_call != nullptr);
879
880 // Store the native entry as an unboxed constant and convert it to an
881 // untagged pointer for the FfiCall.
882 Zone* const Z = flow_graph->zone();
883 auto* const load_entry_point = new (Z) IntConverterInstr(
884 kUnboxedIntPtr, kUntagged,
885 new (Z) Value(flow_graph->GetConstant(
886 Integer::Handle(Z, Integer::NewCanonical(native_entry)),
887 kUnboxedIntPtr)),
888 DeoptId::kNone);
889 flow_graph->InsertBefore(static_call, load_entry_point, /*env=*/nullptr,
890 FlowGraph::kValue);
891
892 // Make an FfiCall based on ffi_trampoline that calls our native function.
893 const intptr_t num_arguments =
894 FfiCallInstr::InputCountForMarshaller(marshaller);
895 RELEASE_ASSERT(num_arguments == 1);
896 InputsArray arguments(num_arguments);
897 arguments.Add(new (Z) Value(load_entry_point));
898 auto* const ffi_call = new (Z)
899 FfiCallInstr(DeoptId::kNone, marshaller, is_leaf, std::move(arguments));
901 ffi_call->InputAt(ffi_call->TargetAddressIndex())->definition() ==
902 load_entry_point);
903 flow_graph->InsertBefore(static_call, ffi_call, /*env=*/nullptr,
904 FlowGraph::kEffect);
905
906 // Remove the placeholder call.
907 static_call->RemoveFromGraph(/*return_previous=*/false);
908 }
909
910 // Run remaining relevant compiler passes.
911 pipeline->RunAdditionalPasses({
912 CompilerPass::kApplyICData,
913 CompilerPass::kTryOptimizePatterns,
914 CompilerPass::kSetOuterInliningId,
915 CompilerPass::kTypePropagation,
916 // Skipping passes that don't seem to do anything for this test.
917 CompilerPass::kWidenSmiToInt32,
918 CompilerPass::kSelectRepresentations,
919 // Skipping passes that don't seem to do anything for this test.
920 CompilerPass::kTypePropagation,
921 CompilerPass::kRangeAnalysis,
922 // Skipping passes that don't seem to do anything for this test.
923 CompilerPass::kFinalizeGraph,
924 CompilerPass::kCanonicalize,
925 CompilerPass::kAllocateRegisters,
926 CompilerPass::kReorderBlocks,
927 });
928
929 return flow_graph;
930}
931
932// Test that FFI calls spill all live values to the stack, and that FFI leaf
933// calls are free to use available ABI callee-save registers to avoid spilling.
934// Additionally test that register allocation is done correctly by clobbering
935// all volatile registers in the native function being called.
936ISOLATE_UNIT_TEST_CASE(IRTest_FfiCallInstrLeafDoesntSpill) {
937 const char* kScript = R"(
938 import 'dart:ffi';
939
940 // This is purely a placeholder and is never called.
941 void placeholder() {}
942
943 // Will call the "doFfiCall" and exercise its code.
944 bool invokeDoFfiCall() {
945 final double result = doFfiCall(1, 2, 3, 1.0, 2.0, 3.0);
946 if (result != (2 + 3 + 4 + 2.0 + 3.0 + 4.0)) {
947 throw 'Failed. Result was $result.';
948 }
949 return true;
950 }
951
952 // Will perform a "C" call while having live values in registers
953 // across the FfiCall.
954 double doFfiCall(int a, int b, int c, double x, double y, double z) {
955 // Ensure there is at least one live value in a register.
956 a += 1;
957 b += 1;
958 c += 1;
959 x += 1.0;
960 y += 1.0;
961 z += 1.0;
962 // We'll replace this StaticCall with an FfiCall.
963 placeholder();
964 // Use the live value.
965 return (a + b + c + x + y + z);
966 }
967
968 // FFI trampoline function.
969 typedef NT = Void Function();
970 typedef DT = void Function();
971 Pointer<NativeFunction<NT>> ptr = Pointer.fromAddress(0);
972 DT getFfiTrampolineClosure() => ptr.asFunction(isLeaf:true);
973 )";
974
975 const auto& root_library = Library::Handle(LoadTestScript(kScript));
976
977 // Build a "C" function that we can actually invoke.
978 auto& c_function = Instructions::Handle(
980 // Clobber all volatile registers to make sure caller doesn't rely on
981 // any non-callee-save register.
982 for (intptr_t reg = 0; reg < kNumberOfFpuRegisters; reg++) {
983 if ((kAbiVolatileFpuRegs & (1 << reg)) != 0) {
984#if defined(TARGET_ARCH_ARM)
985 // On ARM we need an extra scratch register for LoadDImmediate.
986 assembler->LoadDImmediate(static_cast<DRegister>(reg), 0.0, R3);
987#else
988 assembler->LoadDImmediate(static_cast<FpuRegister>(reg), 0.0);
989#endif
990 }
991 }
992 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) {
993 if ((kDartVolatileCpuRegs & (1 << reg)) != 0) {
994 assembler->LoadImmediate(static_cast<Register>(reg), 0xDEADBEEF);
995 }
996 }
997 assembler->Ret();
998 }));
999 uword native_entry = c_function.EntryPoint();
1000
1001 // Get initial compilation done.
1002 Invoke(root_library, "invokeDoFfiCall");
1003
1004 const Function& do_ffi_call =
1005 Function::Handle(GetFunction(root_library, "doFfiCall"));
1006 RELEASE_ASSERT(!do_ffi_call.IsNull());
1007
1008 const auto& value = Closure::Handle(
1009 Closure::RawCast(Invoke(root_library, "getFfiTrampolineClosure")));
1010 RELEASE_ASSERT(value.IsClosure());
1011 const auto& ffi_trampoline =
1012 Function::ZoneHandle(Closure::Cast(value).function());
1013 RELEASE_ASSERT(!ffi_trampoline.IsNull());
1014
1015 // Construct the FFICallInstr from the trampoline matching our native
1016 // function.
1017 const char* error = nullptr;
1018 auto* const zone = thread->zone();
1019 const auto& c_signature =
1020 FunctionType::ZoneHandle(zone, ffi_trampoline.FfiCSignature());
1021 const auto marshaller_ptr = compiler::ffi::CallMarshaller::FromFunction(
1022 zone, ffi_trampoline, /*function_params_start_at=*/1, c_signature,
1023 &error);
1024 RELEASE_ASSERT(error == nullptr);
1025 RELEASE_ASSERT(marshaller_ptr != nullptr);
1026 const auto& marshaller = *marshaller_ptr;
1027
1028 const auto& compile_and_run =
1029 [&](bool is_leaf, std::function<void(ParallelMoveInstr*)> verify) {
1030 // Build the SSA graph for "doFfiCall"
1031 TestPipeline pipeline(do_ffi_call, CompilerPass::kJIT);
1032 FlowGraph* flow_graph =
1033 SetupFfiFlowgraph(&pipeline, marshaller, native_entry, is_leaf);
1034
1035 {
1036 ParallelMoveInstr* parallel_move = nullptr;
1037 ILMatcher cursor(flow_graph,
1038 flow_graph->graph_entry()->normal_entry(),
1039 /*trace=*/false);
1040 while (cursor.TryMatch(
1041 {kMoveGlob, {kMatchAndMoveParallelMove, &parallel_move}})) {
1042 verify(parallel_move);
1043 }
1044 }
1045
1046 // Finish the compilation and attach code so we can run it.
1047 pipeline.CompileGraphAndAttachFunction();
1048
1049 // Ensure we can successfully invoke the FFI call.
1050 auto& result = Object::Handle(Invoke(root_library, "invokeDoFfiCall"));
1051 RELEASE_ASSERT(result.IsBool());
1052 EXPECT(Bool::Cast(result).value());
1053 };
1054
1055 intptr_t num_cpu_reg_to_stack_nonleaf = 0;
1056 intptr_t num_cpu_reg_to_stack_leaf = 0;
1057 intptr_t num_fpu_reg_to_stack_nonleaf = 0;
1058 intptr_t num_fpu_reg_to_stack_leaf = 0;
1059
1060 // Test non-leaf spills live values.
1061 compile_and_run(/*is_leaf=*/false, [&](ParallelMoveInstr* parallel_move) {
1062 // TargetAddress is passed in register, live values are all spilled.
1063 for (int i = 0; i < parallel_move->NumMoves(); i++) {
1064 auto move = parallel_move->moves()[i];
1065 if (move->src_slot()->IsRegister() && move->dest_slot()->IsStackSlot()) {
1066 num_cpu_reg_to_stack_nonleaf++;
1067 } else if (move->src_slot()->IsFpuRegister() &&
1068 move->dest_slot()->IsDoubleStackSlot()) {
1069 num_fpu_reg_to_stack_nonleaf++;
1070 }
1071 }
1072 });
1073
1074 // Test leaf calls do not cause spills of live values.
1075 compile_and_run(/*is_leaf=*/true, [&](ParallelMoveInstr* parallel_move) {
1076 // TargetAddress is passed in registers, live values are not spilled and
1077 // remains in callee-save registers.
1078 for (int i = 0; i < parallel_move->NumMoves(); i++) {
1079 auto move = parallel_move->moves()[i];
1080 if (move->src_slot()->IsRegister() && move->dest_slot()->IsStackSlot()) {
1081 num_cpu_reg_to_stack_leaf++;
1082 } else if (move->src_slot()->IsFpuRegister() &&
1083 move->dest_slot()->IsDoubleStackSlot()) {
1084 num_fpu_reg_to_stack_leaf++;
1085 }
1086 }
1087 });
1088
1089 // We should have less moves to the stack (i.e. spilling) in leaf calls.
1090 EXPECT_LT(num_cpu_reg_to_stack_leaf, num_cpu_reg_to_stack_nonleaf);
1091 // We don't have volatile FPU registers on all platforms.
1092 const bool has_callee_save_fpu_regs =
1093 Utils::CountOneBitsWord(kAbiVolatileFpuRegs) <
1094 Utils::CountOneBitsWord(kAllFpuRegistersList);
1095 EXPECT(!has_callee_save_fpu_regs ||
1096 num_fpu_reg_to_stack_leaf < num_fpu_reg_to_stack_nonleaf);
1097}
1098
1099static void TestConstantFoldToSmi(const Library& root_library,
1100 const char* function_name,
1102 intptr_t expected_value) {
1103 const auto& function =
1104 Function::Handle(GetFunction(root_library, function_name));
1105
1106 TestPipeline pipeline(function, mode);
1107 FlowGraph* flow_graph = pipeline.RunPasses({});
1108
1109 auto entry = flow_graph->graph_entry()->normal_entry();
1110 EXPECT(entry != nullptr);
1111
1112 DartReturnInstr* ret = nullptr;
1113
1114 ILMatcher cursor(flow_graph, entry, true, ParallelMovesHandling::kSkip);
1115 RELEASE_ASSERT(cursor.TryMatch({
1116 kMoveGlob,
1117 {kMatchDartReturn, &ret},
1118 }));
1119
1120 ConstantInstr* constant = ret->value()->definition()->AsConstant();
1121 EXPECT(constant != nullptr);
1122 if (constant != nullptr) {
1123 const Object& value = constant->value();
1124 EXPECT(value.IsSmi());
1125 if (value.IsSmi()) {
1126 const intptr_t int_value = Smi::Cast(value).Value();
1127 EXPECT_EQ(expected_value, int_value);
1128 }
1129 }
1130}
1131
1132ISOLATE_UNIT_TEST_CASE(ConstantFold_bitLength) {
1133 // clang-format off
1134 auto kScript = R"(
1135 b0() => 0. bitLength; // 0...00000
1136 b1() => 1. bitLength; // 0...00001
1137 b100() => 100. bitLength;
1138 b200() => 200. bitLength;
1139 bffff() => 0xffff. bitLength;
1140 m1() => (-1).bitLength; // 1...11111
1141 m2() => (-2).bitLength; // 1...11110
1142
1143 main() {
1144 b0();
1145 b1();
1146 b100();
1147 b200();
1148 bffff();
1149 m1();
1150 m2();
1151 }
1152 )";
1153 // clang-format on
1154
1155 const auto& root_library = Library::Handle(LoadTestScript(kScript));
1156 Invoke(root_library, "main");
1157
1158 auto test = [&](const char* function, intptr_t expected) {
1159 TestConstantFoldToSmi(root_library, function, CompilerPass::kJIT, expected);
1160 TestConstantFoldToSmi(root_library, function, CompilerPass::kAOT, expected);
1161 };
1162
1163 test("b0", 0);
1164 test("b1", 1);
1165 test("b100", 7);
1166 test("b200", 8);
1167 test("bffff", 16);
1168 test("m1", 0);
1169 test("m2", 1);
1170}
1171
1173 Thread* thread,
1174 bool allow_representation_change) {
1176
1177 const auto& lib = Library::Handle(Library::CoreLibrary());
1178 const Class& list_class =
1179 Class::Handle(lib.LookupClassAllowPrivate(Symbols::_List()));
1180 EXPECT(!list_class.IsNull());
1181 const Error& err = Error::Handle(list_class.EnsureIsFinalized(thread));
1182 EXPECT(err.IsNull());
1183 const Function& list_filled = Function::ZoneHandle(
1184 list_class.LookupFactoryAllowPrivate(Symbols::_ListFilledFactory()));
1185 EXPECT(!list_filled.IsNull());
1186
1187 CompilerState S(thread, /*is_aot=*/true, /*is_optimizing=*/true);
1188
1189 FlowGraphBuilderHelper H(/*num_parameters=*/1);
1190 H.AddVariable("param", AbstractType::ZoneHandle(Type::IntType()));
1191
1192 auto normal_entry = H.flow_graph()->graph_entry()->normal_entry();
1193
1194 Definition* param = nullptr;
1195 LoadFieldInstr* load = nullptr;
1196 UnboxInstr* unbox = nullptr;
1197 Definition* add = nullptr;
1198 {
1199 BlockBuilder builder(H.flow_graph(), normal_entry);
1200 param = builder.AddParameter(0, kUnboxedInt64);
1201
1203 args.Add(new Value(H.flow_graph()->constant_null()));
1204 args.Add(new Value(param));
1205 args.Add(new Value(H.IntConstant(0)));
1206 StaticCallInstr* array = builder.AddDefinition(new StaticCallInstr(
1207 InstructionSource(), list_filled, 1, Array::empty_array(),
1208 std::move(args), DeoptId::kNone, 0, ICData::kNoRebind));
1209 array->UpdateType(CompileType::FromCid(kArrayCid));
1210 array->SetResultType(thread->zone(), CompileType::FromCid(kArrayCid));
1211 array->set_is_known_list_constructor(true);
1212
1213 load = builder.AddDefinition(new LoadFieldInstr(
1214 new Value(array), Slot::Array_length(), InstructionSource()));
1215
1216 unbox = builder.AddDefinition(new UnboxInt64Instr(
1217 new Value(load), DeoptId::kNone, Instruction::kNotSpeculative));
1218
1219 add = builder.AddDefinition(new BinaryInt64OpInstr(
1220 Token::kADD, new Value(unbox), new Value(H.IntConstant(1)),
1221 S.GetNextDeoptId(), Instruction::kNotSpeculative));
1222
1223 Definition* box = builder.AddDefinition(new BoxInt64Instr(new Value(add)));
1224
1225 builder.AddReturn(new Value(box));
1226 }
1227
1228 H.FinishGraph();
1229
1230 if (!allow_representation_change) {
1231 H.flow_graph()->disallow_unmatched_representations();
1232 }
1233
1234 H.flow_graph()->Canonicalize();
1235
1236 if (allow_representation_change) {
1237 EXPECT(add->InputAt(0)->definition() == param);
1238 } else {
1239 EXPECT(add->InputAt(0)->definition() == unbox);
1240 EXPECT(unbox->value()->definition() == load);
1241 }
1242}
1243
1244ISOLATE_UNIT_TEST_CASE(IL_Canonicalize_RepresentationChange) {
1247}
1248
1254
1256 Thread* thread,
1257 TypeDataField field_kind) {
1258 const auto& typed_data_lib = Library::Handle(Library::TypedDataLibrary());
1259 const auto& view_cls = Class::Handle(
1260 typed_data_lib.LookupClassAllowPrivate(Symbols::_Float32ArrayView()));
1261 const Error& err = Error::Handle(view_cls.EnsureIsFinalized(thread));
1262 EXPECT(err.IsNull());
1263 const auto& factory =
1264 Function::ZoneHandle(view_cls.LookupFactoryAllowPrivate(String::Handle(
1265 String::Concat(Symbols::_Float32ArrayView(), Symbols::DotUnder()))));
1266 EXPECT(!factory.IsNull());
1267
1269 CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
1271
1272 const Slot* field = nullptr;
1273 switch (field_kind) {
1275 field = &Slot::TypedDataBase_length();
1276 break;
1278 field = &Slot::TypedDataView_offset_in_bytes();
1279 break;
1281 field = &Slot::TypedDataView_typed_data();
1282 break;
1283 }
1284
1285 auto b1 = H.flow_graph()->graph_entry()->normal_entry();
1286
1287 const auto constant_4 = H.IntConstant(4);
1288 const auto constant_1 = H.IntConstant(1);
1289
1290 Definition* array;
1292 DartReturnInstr* ret;
1293
1294 {
1295 BlockBuilder builder(H.flow_graph(), b1);
1296 // array <- AllocateTypedData(1)
1297 array = builder.AddDefinition(new AllocateTypedDataInstr(
1298 InstructionSource(), kTypedDataFloat64ArrayCid, new Value(constant_1),
1299 DeoptId::kNone));
1300 // view <- StaticCall(_Float32ArrayView._, null, array, 4, 1)
1301 const auto view = builder.AddDefinition(new StaticCallInstr(
1302 InstructionSource(), factory, 1, Array::empty_array(),
1303 {new Value(H.flow_graph()->constant_null()), new Value(array),
1304 new Value(constant_4), new Value(constant_1)},
1305 DeoptId::kNone, 1, ICData::RebindRule::kStatic));
1306 // array_alias <- LoadField(view.length)
1307 load = builder.AddDefinition(
1308 new LoadFieldInstr(new Value(view), *field, InstructionSource()));
1309 // Return(load)
1310 ret = builder.AddReturn(new Value(load));
1311 }
1312 H.FinishGraph();
1313 H.flow_graph()->Canonicalize();
1314
1315 switch (field_kind) {
1317 EXPECT_PROPERTY(ret->value()->definition(), &it == constant_1);
1318 break;
1320 EXPECT_PROPERTY(ret->value()->definition(), &it == constant_4);
1321 break;
1323 EXPECT_PROPERTY(ret->value()->definition(), &it == array);
1324 break;
1325 }
1326}
1327
1335
1336// Check that canonicalize can devirtualize InstanceCall based on type
1337// information in AOT mode.
1338ISOLATE_UNIT_TEST_CASE(IL_Canonicalize_InstanceCallWithNoICDataInAOT) {
1339 const auto& typed_data_lib = Library::Handle(Library::TypedDataLibrary());
1340 const auto& view_cls = Class::Handle(typed_data_lib.LookupClassAllowPrivate(
1341 String::Handle(Symbols::New(thread, "_TypedListBase"))));
1342 const Error& err = Error::Handle(view_cls.EnsureIsFinalized(thread));
1343 EXPECT(err.IsNull());
1344 const auto& getter = Function::Handle(
1345 view_cls.LookupFunctionAllowPrivate(Symbols::GetLength()));
1346 EXPECT(!getter.IsNull());
1347
1349 CompilerState S(thread, /*is_aot=*/true, /*is_optimizing=*/true);
1351
1352 auto b1 = H.flow_graph()->graph_entry()->normal_entry();
1353
1354 InstanceCallInstr* length_call;
1355 DartReturnInstr* ret;
1356
1357 {
1358 BlockBuilder builder(H.flow_graph(), b1);
1359 // array <- AllocateTypedData(1)
1360 const auto array = builder.AddDefinition(new AllocateTypedDataInstr(
1361 InstructionSource(), kTypedDataFloat64ArrayCid,
1362 new Value(H.IntConstant(1)), DeoptId::kNone));
1363 // length_call <- InstanceCall('get:length', array, ICData[])
1364 length_call = builder.AddDefinition(new InstanceCallInstr(
1365 InstructionSource(), Symbols::GetLength(), Token::kGET,
1366 /*args=*/{new Value(array)}, 0, Array::empty_array(), 1,
1367 /*deopt_id=*/42));
1368 length_call->EnsureICData(H.flow_graph());
1369 // Return(load)
1370 ret = builder.AddReturn(new Value(length_call));
1371 }
1372 H.FinishGraph();
1373 H.flow_graph()->Canonicalize();
1374
1375 EXPECT_PROPERTY(length_call, it.previous() == nullptr);
1376 EXPECT_PROPERTY(ret->value()->definition(), it.IsStaticCall());
1377 EXPECT_PROPERTY(ret->value()->definition()->AsStaticCall(),
1378 it.function().ptr() == getter.ptr());
1379}
1380
1382 uword lower,
1383 uword upper,
1384 bool result) {
1386 CompilerState S(Thread::Current(), /*is_aot=*/true, /*is_optimizing=*/true);
1387 FlowGraphBuilderHelper H(/*num_parameters=*/1);
1388 H.AddVariable("v0", type);
1389
1390 auto normal_entry = H.flow_graph()->graph_entry()->normal_entry();
1391
1392 DartReturnInstr* ret;
1393 {
1394 BlockBuilder builder(H.flow_graph(), normal_entry);
1395 Definition* param = builder.AddParameter(0, kTagged);
1396 Definition* load_cid =
1397 builder.AddDefinition(new LoadClassIdInstr(new Value(param)));
1398 Definition* test_range = builder.AddDefinition(new TestRangeInstr(
1399 InstructionSource(), new Value(load_cid), lower, upper, kTagged));
1400 ret = builder.AddReturn(new Value(test_range));
1401 }
1402 H.FinishGraph();
1403 H.flow_graph()->Canonicalize();
1404
1406 EXPECT_PROPERTY(ret,
1407 it.value()->BoundConstant().ptr() == Bool::Get(result).ptr());
1408}
1409
1410ISOLATE_UNIT_TEST_CASE(IL_Canonicalize_TestRange) {
1411 HierarchyInfo hierarchy_info(thread);
1412 TestTestRangeCanonicalize(AbstractType::ZoneHandle(Type::IntType()),
1413 kOneByteStringCid, kTwoByteStringCid, false);
1414 TestTestRangeCanonicalize(AbstractType::ZoneHandle(Type::IntType()), kSmiCid,
1415 kMintCid, true);
1416 TestTestRangeCanonicalize(AbstractType::ZoneHandle(Type::NullType()), kSmiCid,
1417 kMintCid, false);
1418 TestTestRangeCanonicalize(AbstractType::ZoneHandle(Type::Double()), kSmiCid,
1419 kMintCid, false);
1420 TestTestRangeCanonicalize(AbstractType::ZoneHandle(Type::ObjectType()), 1,
1421 kClassIdTagMax, true);
1422}
1423
1425 const Class& test_cls,
1426 const Field& field,
1427 intptr_t num_stores,
1428 bool expected_to_forward) {
1429 EXPECT(num_stores <= 2);
1430
1432 CompilerState S(thread, /*is_aot=*/false, /*is_optimizing=*/true);
1434
1435 auto b1 = H.flow_graph()->graph_entry()->normal_entry();
1436
1437 const auto constant_42 = H.IntConstant(42);
1438 const auto constant_24 = H.IntConstant(24);
1440 DartReturnInstr* ret;
1441
1442 {
1443 BlockBuilder builder(H.flow_graph(), b1);
1444 // obj <- AllocateObject(TestClass)
1445 const auto obj = builder.AddDefinition(
1446 new AllocateObjectInstr(InstructionSource(), test_cls, DeoptId::kNone));
1447
1448 if (num_stores >= 1) {
1449 // StoreField(o.field = 42)
1450 builder.AddInstruction(new StoreFieldInstr(
1451 field, new Value(obj), new Value(constant_42),
1452 StoreBarrierType::kNoStoreBarrier, InstructionSource(),
1453 &H.flow_graph()->parsed_function(),
1454 StoreFieldInstr::Kind::kInitializing));
1455 }
1456
1457 if (num_stores >= 2) {
1458 // StoreField(o.field = 24)
1459 builder.AddInstruction(new StoreFieldInstr(
1460 field, new Value(obj), new Value(constant_24),
1461 StoreBarrierType::kNoStoreBarrier, InstructionSource(),
1462 &H.flow_graph()->parsed_function()));
1463 }
1464
1465 // load <- LoadField(view.field)
1466 load = builder.AddDefinition(new LoadFieldInstr(
1467 new Value(obj), Slot::Get(field, &H.flow_graph()->parsed_function()),
1469
1470 // Return(load)
1471 ret = builder.AddReturn(new Value(load));
1472 }
1473 H.FinishGraph();
1474 H.flow_graph()->Canonicalize();
1475
1476 if (expected_to_forward) {
1477 EXPECT_PROPERTY(ret->value()->definition(), &it == constant_42);
1478 } else {
1479 EXPECT_PROPERTY(ret->value()->definition(), &it == load);
1480 }
1481}
1482
1483ISOLATE_UNIT_TEST_CASE(IL_Canonicalize_FinalFieldForwarding) {
1484 const char* script_chars = R"(
1485 import 'dart:typed_data';
1486
1487 class TestClass {
1488 final dynamic finalField;
1489 late final dynamic lateFinalField;
1490 dynamic normalField;
1491
1492 TestClass(this.finalField, this.lateFinalField, this.normalField);
1493 }
1494 )";
1495 const auto& lib = Library::Handle(LoadTestScript(script_chars));
1496
1497 const auto& test_cls = Class::ZoneHandle(
1498 lib.LookupClass(String::Handle(Symbols::New(thread, "TestClass"))));
1499 const auto& err = Error::Handle(test_cls.EnsureIsFinalized(thread));
1500 EXPECT(err.IsNull());
1501
1502 const auto lookup_field = [&](const char* name) -> const Field& {
1503 const auto& original_field = Field::Handle(
1504 test_cls.LookupField(String::Handle(Symbols::New(thread, name))));
1505 EXPECT(!original_field.IsNull());
1506 return Field::Handle(original_field.CloneFromOriginal());
1507 };
1508
1509 const auto& final_field = lookup_field("finalField");
1510 const auto& late_final_field = lookup_field("lateFinalField");
1511 const auto& normal_field = lookup_field("normalField");
1512
1513 TestStaticFieldForwarding(thread, test_cls, final_field, /*num_stores=*/0,
1514 /*expected_to_forward=*/false);
1515 TestStaticFieldForwarding(thread, test_cls, final_field, /*num_stores=*/1,
1516 /*expected_to_forward=*/true);
1517 TestStaticFieldForwarding(thread, test_cls, final_field, /*num_stores=*/2,
1518 /*expected_to_forward=*/false);
1519
1520 TestStaticFieldForwarding(thread, test_cls, late_final_field,
1521 /*num_stores=*/0, /*expected_to_forward=*/false);
1522 TestStaticFieldForwarding(thread, test_cls, late_final_field,
1523 /*num_stores=*/1, /*expected_to_forward=*/false);
1524 TestStaticFieldForwarding(thread, test_cls, late_final_field,
1525 /*num_stores=*/2, /*expected_to_forward=*/false);
1526
1527 TestStaticFieldForwarding(thread, test_cls, normal_field, /*num_stores=*/0,
1528 /*expected_to_forward=*/false);
1529 TestStaticFieldForwarding(thread, test_cls, normal_field, /*num_stores=*/1,
1530 /*expected_to_forward=*/false);
1531 TestStaticFieldForwarding(thread, test_cls, normal_field, /*num_stores=*/2,
1532 /*expected_to_forward=*/false);
1533}
1534
1535} // namespace dart
static bool compare(const SkBitmap &ref, const SkIRect &iref, const SkBitmap &test, const SkIRect &itest)
Definition BlurTest.cpp:100
#define test(name)
static void test_range(skiatest::Reporter *reporter)
SI void store(P *ptr, const T &val)
SI D cast(const S &v)
SI T load(const P *ptr)
#define EXPECT(type, expectedAlignment, expectedSize)
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define Z
void AddArray(const BaseGrowableArray< T, B, Allocator > &src)
void Add(const T &value)
static const Bool & True()
Definition object.h:10776
ClassPtr At(intptr_t cid) const
intptr_t NumCids() const
bool HasValidClassAt(intptr_t cid) const
ErrorPtr EnsureIsFinalized(Thread *thread) const
Definition object.cc:4979
FunctionPtr LookupFactoryAllowPrivate(const String &name) const
Definition object.cc:6218
Value * value() const
Definition il.h:3468
PRINT_OPERANDS_TO_SUPPORT PRINT_TO_SUPPORT bool UpdateType(CompileType new_type)
Definition il.h:2535
void set_range(const Range &)
static constexpr intptr_t kNone
Definition deopt_id.h:27
void Fail(const char *format,...) const PRINTF_ATTRIBUTE(2
Definition assert.cc:58
GraphEntryInstr * graph_entry() const
Definition flow_graph.h:268
ConstantInstr * GetConstant(const Object &object, Representation representation=kTagged)
Zone * zone() const
Definition flow_graph.h:261
BlockIterator reverse_postorder_iterator() const
Definition flow_graph.h:219
void InsertBefore(Instruction *next, Instruction *instr, Environment *env, UseKind use_kind)
Definition flow_graph.h:312
FunctionEntryInstr * normal_entry() const
Definition il.h:1986
const CidRangeVector & SubtypeRangesForClass(const Class &klass, bool include_abstract, bool exclude_null)
Definition il.cc:110
bool CanUseSubtypeRangeCheckFor(const AbstractType &type)
Definition il.cc:305
bool TryMatch(std::initializer_list< MatchCode > match_codes, MatchOpCode insert_before=kInvalidMatchOpCode)
void EnsureICData(FlowGraph *graph)
Definition il.cc:5242
virtual Value * InputAt(intptr_t i) const =0
virtual bool ComputeCanDeoptimize() const =0
void SetInputAt(intptr_t i, Value *value)
Definition il.h:1008
Instruction * RemoveFromGraph(bool return_previous=true)
Definition il.cc:1299
Instruction * previous() const
Definition il.h:1081
static char * SCreate(Zone *zone, const char *format,...) PRINTF_ATTRIBUTE(2
ObjectPtr ptr() const
Definition object.h:332
bool IsNull() const
Definition object.h:363
static Object & Handle()
Definition object.h:407
static Object & ZoneHandle()
Definition object.h:419
void SetResultType(Zone *zone, CompileType new_type)
Definition il.h:5599
void set_is_known_list_constructor(bool value)
Definition il.h:5613
static const char * NullableTag()
Definition unit_test.h:421
void RunAdditionalPasses(std::initializer_list< CompilerPass::Id > passes)
FlowGraph * RunPasses(std::initializer_list< CompilerPass::Id > passes)
Zone * zone() const
Value * value() const
Definition il.h:8630
std::unique_ptr< char, decltype(std::free) * > CStringUniquePtr
Definition utils.h:644
bool BindsToConstant() const
Definition il.cc:1181
bool Equals(const Value &other) const
Definition il.cc:631
const Object & BoundConstant() const
Definition il.cc:1199
Definition * definition() const
Definition il.h:103
void LoadDImmediate(DRegister dd, double value, Register scratch, Condition cond=AL)
void Ret(Condition cond=AL)
void LoadImmediate(Register rd, Immediate value, Condition cond=AL)
#define H
#define THR_Print(format,...)
Definition log.h:20
#define ASSERT(E)
if(end==-1)
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
static const char * expected_value
static const uint8_t buffer[]
const uint8_t uint32_t uint32_t GError ** error
uint8_t value
GAsyncResult * result
Dart_NativeFunction function
Definition fuchsia.cc:51
const char * name
Definition fuchsia.cc:50
#define RANGES_CONTAIN_EXPECTED_CIDS(ranges, cids)
Definition il_test.cc:498
#define EXPECT_PROPERTY(entity, property)
constexpr int16_t kMaxInt16
Definition globals.h:480
LibraryPtr LoadTestScript(const char *script, Dart_NativeEntryResolver resolver, const char *lib_uri)
static void WriteCidRangeVectorTo(const CidRangeVector &ranges, BaseTextBuffer *buffer)
Definition il_test.cc:439
TypeDataField
Definition il_test.cc:1249
@ TypedDataView_offset_in_bytes
Definition il_test.cc:1251
@ TypedDataBase_length
Definition il_test.cc:1250
@ TypedDataView_typed_data
Definition il_test.cc:1252
constexpr int32_t kMinInt32
Definition globals.h:482
static void TestTestRangeCanonicalize(const AbstractType &type, uword lower, uword upper, bool result)
Definition il_test.cc:1381
@ kNoStoreBarrier
Definition il.h:6252
static void TestRepresentationChangeDuringCanonicalization(Thread *thread, bool allow_representation_change)
Definition il_test.cc:1172
@ kNullCid
Definition class_id.h:252
@ kVoidCid
Definition class_id.h:254
@ kDynamicCid
Definition class_id.h:253
@ kNeverCid
Definition class_id.h:255
Representation
Definition locations.h:66
constexpr uint32_t kMaxUint32
Definition globals.h:484
static void TestCanonicalizationOfTypedDataViewFieldLoads(Thread *thread, TypeDataField field_kind)
Definition il_test.cc:1255
ObjectPtr Invoke(const Library &lib, const char *name)
FunctionPtr GetFunction(const Library &lib, const char *name)
static void RunInitializingStoresTest(const Library &root_library, const char *function_name, CompilerPass::PipelineMode mode, const std::vector< const char * > &expected_stores)
Definition il_test.cc:112
uintptr_t uword
Definition globals.h:501
bool TestIntConverterCanonicalizationRule(Thread *thread, int64_t min_value, int64_t max_value, Representation initial, Representation intermediate, Representation final)
Definition il_test.cc:196
static void TestNullAwareEqualityCompareCanonicalization(Thread *thread, bool allow_representation_change)
Definition il_test.cc:396
@ kNumberOfCpuRegisters
const int kNumberOfFpuRegisters
static void TestConstantFoldToSmi(const Library &root_library, const char *function_name, CompilerPass::PipelineMode mode, intptr_t expected_value)
Definition il_test.cc:1099
static void ExpectStores(FlowGraph *flow_graph, const std::vector< const char * > &expected_stores)
Definition il_test.cc:95
const RegList kDartVolatileCpuRegs
const intptr_t cid
static void RangesContainExpectedCids(const Expect &expect, const CidRangeVector &ranges, const GrowableArray< intptr_t > &expected)
Definition il_test.cc:470
constexpr int32_t kMaxInt32
Definition globals.h:483
static void WriteCidTo(intptr_t cid, BaseTextBuffer *buffer)
Definition il_test.cc:383
constexpr int16_t kMinInt16
Definition globals.h:479
static bool ExpectRangesContainCid(const Expect &expect, const CidRangeVector &ranges, intptr_t expected)
Definition il_test.cc:455
const char *const function_name
void TestStaticFieldForwarding(Thread *thread, const Class &test_cls, const Field &field, intptr_t num_stores, bool expected_to_forward)
Definition il_test.cc:1424
InstructionsPtr BuildInstructions(std::function< void(compiler::Assembler *assembler)> fun)
const RegList kAbiVolatileFpuRegs
static constexpr intptr_t kInvalidTryIndex
@ kAlignedAccess
Definition il.h:6722
FlowGraph * SetupFfiFlowgraph(TestPipeline *pipeline, const compiler::ffi::CallMarshaller &marshaller, uword native_entry, bool is_leaf)
Definition il_test.cc:864
#define Pd
Definition globals.h:408
Definition SkMD5.cpp:130
#define ISOLATE_UNIT_TEST_CASE(name)
Definition unit_test.h:64