1
+ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
1
2
; RUN: opt -instcombine -S < %s | FileCheck %s
2
3
3
4
@gp = global i32* null , align 8
4
5
5
6
declare i8* @malloc (i64 ) #1
6
7
7
8
define i1 @compare_global_trivialeq () {
9
+ ; CHECK-LABEL: @compare_global_trivialeq(
10
+ ; CHECK-NEXT: ret i1 false
11
+ ;
8
12
%m = call i8* @malloc (i64 4 )
9
13
%bc = bitcast i8* %m to i32*
10
14
%lgp = load i32* , i32** @gp , align 8
11
15
%cmp = icmp eq i32* %bc , %lgp
12
16
ret i1 %cmp
13
- ; CHECK-LABEL: compare_global_trivialeq
14
- ; CHECK: ret i1 false
15
17
}
16
18
17
19
define i1 @compare_global_trivialne () {
20
+ ; CHECK-LABEL: @compare_global_trivialne(
21
+ ; CHECK-NEXT: ret i1 true
22
+ ;
18
23
%m = call i8* @malloc (i64 4 )
19
24
%bc = bitcast i8* %m to i32*
20
25
%lgp = load i32* , i32** @gp , align 8
21
26
%cmp = icmp ne i32* %bc , %lgp
22
27
ret i1 %cmp
23
- ; CHECK-LABEL: compare_global_trivialne
24
- ; CHECK: ret i1 true
25
28
}
26
29
27
30
@@ -30,102 +33,143 @@ define i1 @compare_global_trivialne() {
30
33
; The comparison should fold to false irrespective of whether the call to malloc can be elided or not
31
34
declare void @f ()
32
35
define i1 @compare_and_call_with_deopt () {
33
- ; CHECK-LABEL: compare_and_call_with_deopt
36
+ ; CHECK-LABEL: @compare_and_call_with_deopt(
37
+ ; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(24) i8* @malloc(i64 24)
38
+ ; CHECK-NEXT: tail call void @f() [ "deopt"(i8* [[M]]) ]
39
+ ; CHECK-NEXT: ret i1 false
40
+ ;
34
41
%m = call i8* @malloc (i64 24 )
35
42
%bc = bitcast i8* %m to i32*
36
43
%lgp = load i32* , i32** @gp , align 8 , !nonnull !0
37
44
%cmp = icmp eq i32* %lgp , %bc
38
45
tail call void @f () [ "deopt" (i8* %m ) ]
39
46
ret i1 %cmp
40
- ; CHECK: ret i1 false
41
47
}
42
48
43
49
; Same functon as above with deopt operand in function f, but comparison is NE
44
50
define i1 @compare_ne_and_call_with_deopt () {
45
- ; CHECK-LABEL: compare_ne_and_call_with_deopt
51
+ ; CHECK-LABEL: @compare_ne_and_call_with_deopt(
52
+ ; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(24) i8* @malloc(i64 24)
53
+ ; CHECK-NEXT: tail call void @f() [ "deopt"(i8* [[M]]) ]
54
+ ; CHECK-NEXT: ret i1 true
55
+ ;
46
56
%m = call i8* @malloc (i64 24 )
47
57
%bc = bitcast i8* %m to i32*
48
58
%lgp = load i32* , i32** @gp , align 8 , !nonnull !0
49
59
%cmp = icmp ne i32* %lgp , %bc
50
60
tail call void @f () [ "deopt" (i8* %m ) ]
51
61
ret i1 %cmp
52
- ; CHECK: ret i1 true
53
62
}
54
63
55
64
; Same function as above, but global not marked nonnull, and we cannot fold the comparison
56
65
define i1 @compare_ne_global_maybe_null () {
57
- ; CHECK-LABEL: compare_ne_global_maybe_null
66
+ ; CHECK-LABEL: @compare_ne_global_maybe_null(
67
+ ; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(24) i8* @malloc(i64 24)
68
+ ; CHECK-NEXT: [[BC:%.*]] = bitcast i8* [[M]] to i32*
69
+ ; CHECK-NEXT: [[LGP:%.*]] = load i32*, i32** @gp, align 8
70
+ ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32* [[LGP]], [[BC]]
71
+ ; CHECK-NEXT: tail call void @f() [ "deopt"(i8* [[M]]) ]
72
+ ; CHECK-NEXT: ret i1 [[CMP]]
73
+ ;
58
74
%m = call i8* @malloc (i64 24 )
59
75
%bc = bitcast i8* %m to i32*
60
76
%lgp = load i32* , i32** @gp
61
77
%cmp = icmp ne i32* %lgp , %bc
62
78
tail call void @f () [ "deopt" (i8* %m ) ]
63
79
ret i1 %cmp
64
- ; CHECK: ret i1 %cmp
65
80
}
66
81
67
82
; FIXME: The comparison should fold to false since %m escapes (call to function escape)
68
83
; after the comparison.
69
84
declare void @escape (i8* )
70
85
define i1 @compare_and_call_after () {
71
- ; CHECK-LABEL: compare_and_call_after
86
+ ; CHECK-LABEL: @compare_and_call_after(
87
+ ; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(24) i8* @malloc(i64 24)
88
+ ; CHECK-NEXT: [[BC:%.*]] = bitcast i8* [[M]] to i32*
89
+ ; CHECK-NEXT: [[LGP:%.*]] = load i32*, i32** @gp, align 8, !nonnull !0
90
+ ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32* [[LGP]], [[BC]]
91
+ ; CHECK-NEXT: br i1 [[CMP]], label [[ESCAPE_CALL:%.*]], label [[JUST_RETURN:%.*]]
92
+ ; CHECK: escape_call:
93
+ ; CHECK-NEXT: call void @escape(i8* [[M]])
94
+ ; CHECK-NEXT: ret i1 true
95
+ ; CHECK: just_return:
96
+ ; CHECK-NEXT: ret i1 [[CMP]]
97
+ ;
72
98
%m = call i8* @malloc (i64 24 )
73
99
%bc = bitcast i8* %m to i32*
74
100
%lgp = load i32* , i32** @gp , align 8 , !nonnull !0
75
101
%cmp = icmp eq i32* %bc , %lgp
76
102
br i1 %cmp , label %escape_call , label %just_return
77
103
78
104
escape_call:
79
- call void @escape (i8* %m )
80
- ret i1 true
105
+ call void @escape (i8* %m )
106
+ ret i1 true
81
107
82
108
just_return:
83
- ret i1 %cmp
109
+ ret i1 %cmp
84
110
}
85
111
86
112
define i1 @compare_distinct_mallocs () {
113
+ ; CHECK-LABEL: @compare_distinct_mallocs(
114
+ ; CHECK-NEXT: ret i1 false
115
+ ;
87
116
%m = call i8* @malloc (i64 4 )
88
117
%n = call i8* @malloc (i64 4 )
89
118
%cmp = icmp eq i8* %m , %n
90
119
ret i1 %cmp
91
- ; CHECK-LABEL: compare_distinct_mallocs
92
- ; CHECK: ret i1 false
93
120
}
94
121
95
- ; the compare is folded to true since the folding compare looks through bitcasts.
96
- ; call to malloc and the bitcast instructions are elided after that since there are no uses of the malloc
122
+ ; the compare is folded to true since the folding compare looks through bitcasts.
123
+ ; call to malloc and the bitcast instructions are elided after that since there are no uses of the malloc
97
124
define i1 @compare_samepointer_under_bitcast () {
125
+ ; CHECK-LABEL: @compare_samepointer_under_bitcast(
126
+ ; CHECK-NEXT: ret i1 true
127
+ ;
98
128
%m = call i8* @malloc (i64 4 )
99
129
%bc = bitcast i8* %m to i32*
100
130
%bcback = bitcast i32* %bc to i8*
101
131
%cmp = icmp eq i8* %m , %bcback
102
132
ret i1 %cmp
103
- ; CHECK-LABEL: compare_samepointer_under_bitcast
104
- ; CHECK: ret i1 true
105
133
}
106
134
107
- ; the compare is folded to true since the folding compare looks through bitcasts.
135
+ ; the compare is folded to true since the folding compare looks through bitcasts.
108
136
; The malloc call for %m cannot be elided since it is used in the call to function f.
109
137
define i1 @compare_samepointer_escaped () {
138
+ ; CHECK-LABEL: @compare_samepointer_escaped(
139
+ ; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
140
+ ; CHECK-NEXT: call void @f() [ "deopt"(i8* [[M]]) ]
141
+ ; CHECK-NEXT: ret i1 true
142
+ ;
110
143
%m = call i8* @malloc (i64 4 )
111
144
%bc = bitcast i8* %m to i32*
112
145
%bcback = bitcast i32* %bc to i8*
113
146
%cmp = icmp eq i8* %m , %bcback
114
147
call void @f () [ "deopt" (i8* %m ) ]
115
148
ret i1 %cmp
116
- ; CHECK-LABEL: compare_samepointer_escaped
117
- ; CHECK-NEXT: %m = call i8* @malloc(i64 4)
118
- ; CHECK-NEXT: call void @f() [ "deopt"(i8* %m) ]
119
- ; CHECK: ret i1 true
120
149
}
121
150
122
151
; Technically, we can fold the %cmp2 comparison, even though %m escapes through
123
152
; the ret statement since `ret` terminates the function and we cannot reach from
124
- ; the ret to cmp.
153
+ ; the ret to cmp.
125
154
; FIXME: Folding this %cmp2 when %m escapes through ret could be an issue with
126
155
; cross-threading data dependencies since we do not make the distinction between
127
156
; atomic and non-atomic loads in capture tracking.
128
157
define i8* @compare_ret_escape (i8* %c ) {
158
+ ; CHECK-LABEL: @compare_ret_escape(
159
+ ; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
160
+ ; CHECK-NEXT: [[N:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
161
+ ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[N]], [[C:%.*]]
162
+ ; CHECK-NEXT: br i1 [[CMP]], label [[RETST:%.*]], label [[CHK:%.*]]
163
+ ; CHECK: retst:
164
+ ; CHECK-NEXT: ret i8* [[M]]
165
+ ; CHECK: chk:
166
+ ; CHECK-NEXT: [[BC:%.*]] = bitcast i8* [[M]] to i32*
167
+ ; CHECK-NEXT: [[LGP:%.*]] = load i32*, i32** @gp, align 8, !nonnull !0
168
+ ; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32* [[LGP]], [[BC]]
169
+ ; CHECK-NEXT: br i1 [[CMP2]], label [[RETST]], label [[CHK2:%.*]]
170
+ ; CHECK: chk2:
171
+ ; CHECK-NEXT: ret i8* [[N]]
172
+ ;
129
173
%m = call i8* @malloc (i64 4 )
130
174
%n = call i8* @malloc (i64 4 )
131
175
%cmp = icmp eq i8* %n , %c
@@ -142,23 +186,21 @@ chk:
142
186
143
187
chk2:
144
188
ret i8* %n
145
- ; CHECK-LABEL: compare_ret_escape
146
- ; CHECK: %cmp = icmp eq i8* %n, %c
147
- ; CHECK: %cmp2 = icmp eq i32* %lgp, %bc
148
189
}
149
190
150
191
; The malloc call for %m cannot be elided since it is used in the call to function f.
151
192
; However, the cmp can be folded to true as %n doesnt escape and %m, %n are distinct allocations
152
193
define i1 @compare_distinct_pointer_escape () {
194
+ ; CHECK-LABEL: @compare_distinct_pointer_escape(
195
+ ; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
196
+ ; CHECK-NEXT: tail call void @f() [ "deopt"(i8* [[M]]) ]
197
+ ; CHECK-NEXT: ret i1 true
198
+ ;
153
199
%m = call i8* @malloc (i64 4 )
154
200
%n = call i8* @malloc (i64 4 )
155
201
tail call void @f () [ "deopt" (i8* %m ) ]
156
202
%cmp = icmp ne i8* %m , %n
157
203
ret i1 %cmp
158
- ; CHECK-LABEL: compare_distinct_pointer_escape
159
- ; CHECK-NEXT: %m = call i8* @malloc(i64 4)
160
- ; CHECK-NEXT: tail call void @f() [ "deopt"(i8* %m) ]
161
- ; CHECK-NEXT: ret i1 true
162
204
}
163
205
164
206
!0 = !{}
0 commit comments