@@ -126,22 +126,29 @@ struct AnnihilateStats {
126
126
n_bytes_freed : uint
127
127
}
128
128
129
- unsafe fn each_live_alloc ( f : & fn ( box : * mut BoxRepr , uniq : bool ) -> bool ) {
129
+ unsafe fn each_live_alloc ( read_next_before : bool ,
130
+ f : & fn ( box : * mut BoxRepr , uniq : bool ) -> bool ) {
131
+ //! Walks the internal list of allocations
132
+
130
133
use managed;
131
134
132
135
let task: * Task = transmute ( rustrt:: rust_get_task ( ) ) ;
133
136
let box = ( * task) . boxed_region . live_allocs ;
134
137
let mut box: * mut BoxRepr = transmute ( copy box) ;
135
138
while box != mut_null ( ) {
136
- let next = transmute ( copy ( * box) . header . next ) ;
139
+ let next_before = transmute ( copy ( * box) . header . next ) ;
137
140
let uniq =
138
141
( * box) . header . ref_count == managed:: raw:: RC_MANAGED_UNIQUE ;
139
142
140
143
if ! f ( box, uniq) {
141
144
break
142
145
}
143
146
144
- box = next
147
+ if read_next_before {
148
+ box = next_before;
149
+ } else {
150
+ box = transmute ( copy ( * box) . header . next ) ;
151
+ }
145
152
}
146
153
}
147
154
@@ -159,7 +166,7 @@ fn debug_mem() -> bool {
159
166
#[ cfg( notest) ]
160
167
#[ lang="annihilate" ]
161
168
pub unsafe fn annihilate ( ) {
162
- use unstable:: lang:: local_free;
169
+ use unstable:: lang:: { local_free, debug_ptr } ;
163
170
use io:: WriterUtil ;
164
171
use io;
165
172
use libc;
@@ -173,27 +180,46 @@ pub unsafe fn annihilate() {
173
180
} ;
174
181
175
182
// Pass 1: Make all boxes immortal.
176
- for each_live_alloc |box, uniq| {
183
+ //
184
+ // In this pass, nothing gets freed, so it does not matter whether
185
+ // we read the next field before or after the callback.
186
+ for each_live_alloc( true ) |box, uniq| {
177
187
stats. n_total_boxes += 1 ;
178
188
if uniq {
189
+ debug_ptr ( "Managed-uniq: " , & * box) ;
179
190
stats. n_unique_boxes += 1 ;
180
191
} else {
192
+ debug_ptr ( "Immortalizing: " , & * box) ;
181
193
( * box) . header . ref_count = managed:: raw:: RC_IMMORTAL ;
182
194
}
183
195
}
184
196
185
197
// Pass 2: Drop all boxes.
186
- for each_live_alloc |box, uniq| {
198
+ //
199
+ // In this pass, unique-managed boxes may get freed, but not
200
+ // managed boxes, so we must read the `next` field *after* the
201
+ // callback, as the original value may have been freed.
202
+ for each_live_alloc( false ) |box, uniq| {
187
203
if !uniq {
204
+ debug_ptr ( "Invoking tydesc/glue on: " , & * box) ;
188
205
let tydesc: * TypeDesc = transmute ( copy ( * box) . header . type_desc ) ;
189
206
let drop_glue: DropGlue = transmute ( ( ( * tydesc) . drop_glue , 0 ) ) ;
207
+ debug_ptr ( "Box data: " , & ( * box) . data ) ;
208
+ debug_ptr ( "Type descriptor: " , tydesc) ;
190
209
drop_glue ( to_unsafe_ptr ( & tydesc) , transmute ( & ( * box) . data ) ) ;
210
+ debug_ptr ( "Dropped " , & * box) ;
191
211
}
192
212
}
193
213
194
214
// Pass 3: Free all boxes.
195
- for each_live_alloc |box, uniq| {
215
+ //
216
+ // In this pass, managed boxes may get freed (but not
217
+ // unique-managed boxes, though I think that none of those are
218
+ // left), so we must read the `next` field before, since it will
219
+ // not be valid after.
220
+ for each_live_alloc( true ) |box, uniq| {
196
221
if !uniq {
222
+ debug_ptr ( "About to free: " , & * box) ;
197
223
stats. n_bytes_freed +=
198
224
( * ( ( * box) . header . type_desc ) ) . size
199
225
+ sys:: size_of :: < BoxRepr > ( ) ;
0 commit comments