8
8
#include "ir.h"
9
9
#include "ir_private.h"
10
10
11
+ static int ir_remove_unreachable_blocks (ir_ctx * ctx );
12
+
11
13
IR_ALWAYS_INLINE void _ir_add_successors (const ir_ctx * ctx , ir_ref ref , ir_worklist * worklist )
12
14
{
13
15
ir_use_list * use_list = & ctx -> use_lists [ref ];
@@ -57,6 +59,24 @@ IR_ALWAYS_INLINE void _ir_add_predecessors(const ir_insn *insn, ir_worklist *wor
57
59
}
58
60
}
59
61
62
+ void ir_reset_cfg (ir_ctx * ctx )
63
+ {
64
+ ctx -> cfg_blocks_count = 0 ;
65
+ ctx -> cfg_edges_count = 0 ;
66
+ if (ctx -> cfg_blocks ) {
67
+ ir_mem_free (ctx -> cfg_blocks );
68
+ ctx -> cfg_blocks = NULL ;
69
+ if (ctx -> cfg_edges ) {
70
+ ir_mem_free (ctx -> cfg_edges );
71
+ ctx -> cfg_edges = NULL ;
72
+ }
73
+ if (ctx -> cfg_map ) {
74
+ ir_mem_free (ctx -> cfg_map );
75
+ ctx -> cfg_map = NULL ;
76
+ }
77
+ }
78
+ }
79
+
60
80
int ir_build_cfg (ir_ctx * ctx )
61
81
{
62
82
ir_ref n , * p , ref , start , end ;
@@ -330,11 +350,15 @@ static void ir_remove_merge_input(ir_ctx *ctx, ir_ref merge, ir_ref from)
330
350
}
331
351
}
332
352
i -- ;
353
+ for (j = i + 1 ; j <= n ; j ++ ) {
354
+ ir_insn_set_op (insn , j , IR_UNUSED );
355
+ }
333
356
if (i == 1 ) {
334
357
insn -> op = IR_BEGIN ;
335
358
insn -> inputs_count = 1 ;
336
359
use_list = & ctx -> use_lists [merge ];
337
360
if (use_list -> count > 1 ) {
361
+ n ++ ;
338
362
for (k = 0 , p = & ctx -> use_edges [use_list -> refs ]; k < use_list -> count ; k ++ , p ++ ) {
339
363
use = * p ;
340
364
use_insn = & ctx -> ir_base [use ];
@@ -347,22 +371,24 @@ static void ir_remove_merge_input(ir_ctx *ctx, ir_ref merge, ir_ref from)
347
371
if (ir_bitset_in (life_inputs , j - 1 )) {
348
372
use_insn -> op1 = ir_insn_op (use_insn , j );
349
373
} else if (input > 0 ) {
350
- ir_use_list_remove_all (ctx , input , use );
374
+ ir_use_list_remove_one (ctx , input , use );
351
375
}
352
376
}
353
377
use_insn -> op = IR_COPY ;
354
- use_insn -> op2 = IR_UNUSED ;
355
- use_insn -> op3 = IR_UNUSED ;
378
+ use_insn -> inputs_count = 1 ;
379
+ for (j = 2 ; j <= n ; j ++ ) {
380
+ ir_insn_set_op (use_insn , j , IR_UNUSED );
381
+ }
356
382
ir_use_list_remove_all (ctx , merge , use );
357
383
}
358
384
}
359
385
}
360
386
} else {
361
387
insn -> inputs_count = i ;
362
388
363
- n ++ ;
364
389
use_list = & ctx -> use_lists [merge ];
365
390
if (use_list -> count > 1 ) {
391
+ n ++ ;
366
392
for (k = 0 , p = & ctx -> use_edges [use_list -> refs ]; k < use_list -> count ; k ++ , p ++ ) {
367
393
use = * p ;
368
394
use_insn = & ctx -> ir_base [use ];
@@ -378,9 +404,13 @@ static void ir_remove_merge_input(ir_ctx *ctx, ir_ref merge, ir_ref from)
378
404
}
379
405
i ++ ;
380
406
} else if (input > 0 ) {
381
- ir_use_list_remove_all (ctx , input , use );
407
+ ir_use_list_remove_one (ctx , input , use );
382
408
}
383
409
}
410
+ use_insn -> inputs_count = i - 1 ;
411
+ for (j = i ; j <= n ; j ++ ) {
412
+ ir_insn_set_op (use_insn , j , IR_UNUSED );
413
+ }
384
414
}
385
415
}
386
416
}
@@ -390,7 +420,7 @@ static void ir_remove_merge_input(ir_ctx *ctx, ir_ref merge, ir_ref from)
390
420
}
391
421
392
422
/* CFG constructed after SCCP pass doesn't have unreachable BBs, otherwise they should be removed */
393
- int ir_remove_unreachable_blocks (ir_ctx * ctx )
423
+ static int ir_remove_unreachable_blocks (ir_ctx * ctx )
394
424
{
395
425
uint32_t b , * p , i ;
396
426
uint32_t unreachable_count = 0 ;
0 commit comments