summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--thread/iap.s2886
-rw-r--r--thread/icompact.s1303
-rw-r--r--thread/icompact_rmark.s955
-rw-r--r--thread/icompact_rmarkr.s976
-rw-r--r--thread/icopy.s1215
-rw-r--r--thread/ifileIO3.s738
-rw-r--r--thread/imark.s1927
-rw-r--r--thread/istartup.s5165
-rw-r--r--thread/ithread.s294
9 files changed, 15459 insertions, 0 deletions
diff --git a/thread/iap.s b/thread/iap.s
new file mode 100644
index 0000000..06ea0c2
--- /dev/null
+++ b/thread/iap.s
@@ -0,0 +1,2886 @@
+
+ .text
+
+ .globl ap_2
+ .globl ap_3
+ .globl ap_4
+ .globl ap_5
+ .globl ap_6
+ .globl ap_7
+ .globl ap_8
+ .globl ap_9
+ .globl ap_10
+ .globl ap_11
+ .globl ap_12
+ .globl ap_13
+ .globl ap_14
+ .globl ap_15
+ .globl ap_16
+ .globl ap_17
+ .globl ap_18
+ .globl ap_19
+ .globl ap_20
+ .globl ap_21
+ .globl ap_22
+ .globl ap_23
+ .globl ap_24
+ .globl ap_25
+ .globl ap_26
+ .globl ap_27
+ .globl ap_28
+ .globl ap_29
+ .globl ap_30
+ .globl ap_31
+ .globl ap_32
+
+ .globl add_empty_node_2
+ .globl add_empty_node_3
+ .globl add_empty_node_4
+ .globl add_empty_node_5
+ .globl add_empty_node_6
+ .globl add_empty_node_7
+ .globl add_empty_node_8
+ .globl add_empty_node_9
+ .globl add_empty_node_10
+ .globl add_empty_node_11
+ .globl add_empty_node_12
+ .globl add_empty_node_13
+ .globl add_empty_node_14
+ .globl add_empty_node_15
+ .globl add_empty_node_16
+ .globl add_empty_node_17
+ .globl add_empty_node_18
+ .globl add_empty_node_19
+ .globl add_empty_node_20
+ .globl add_empty_node_21
+ .globl add_empty_node_22
+ .globl add_empty_node_23
+ .globl add_empty_node_24
+ .globl add_empty_node_25
+ .globl add_empty_node_26
+ .globl add_empty_node_27
+ .globl add_empty_node_28
+ .globl add_empty_node_29
+ .globl add_empty_node_30
+ .globl add_empty_node_31
+ .globl add_empty_node_32
+
+ .globl yet_args_needed_5
+ .globl yet_args_needed_6
+ .globl yet_args_needed_7
+ .globl yet_args_needed_8
+ .globl yet_args_needed_9
+ .globl yet_args_needed_10
+ .globl yet_args_needed_11
+ .globl yet_args_needed_12
+ .globl yet_args_needed_13
+ .globl yet_args_needed_14
+ .globl yet_args_needed_15
+ .globl yet_args_needed_16
+ .globl yet_args_needed_17
+ .globl yet_args_needed_18
+ .globl yet_args_needed_19
+ .globl yet_args_needed_20
+ .globl yet_args_needed_21
+ .globl yet_args_needed_22
+ .globl yet_args_needed_23
+ .globl yet_args_needed_24
+ .globl yet_args_needed_25
+ .globl yet_args_needed_26
+ .globl yet_args_needed_27
+ .globl yet_args_needed_28
+ .globl yet_args_needed_29
+ .globl yet_args_needed_30
+ .globl yet_args_needed_31
+
+ap_32:
+ movl (a1),a2
+ movl $32*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_31:
+ movl (a1),a2
+ movl $31*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_30:
+ movl (a1),a2
+ movl $30*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_29:
+ movl (a1),a2
+ movl $29*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_28:
+ movl (a1),a2
+ movl $28*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_27:
+ movl (a1),a2
+ movl $27*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_26:
+ movl (a1),a2
+ movl $26*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_25:
+ movl (a1),a2
+ movl $25*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_24:
+ movl (a1),a2
+ movl $24*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_23:
+ movl (a1),a2
+ movl $23*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_22:
+ movl (a1),a2
+ movl $22*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_21:
+ movl (a1),a2
+ movl $21*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_20:
+ movl (a1),a2
+ movl $20*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_19:
+ movl (a1),a2
+ movl $19*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_18:
+ movl (a1),a2
+ movl $18*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_17:
+ movl (a1),a2
+ movl $17*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_16:
+ movl (a1),a2
+ movl $16*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_15:
+ movl (a1),a2
+ movl $15*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_14:
+ movl (a1),a2
+ movl $14*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_13:
+ movl (a1),a2
+ movl $13*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_12:
+ movl (a1),a2
+ movl $12*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_11:
+ movl (a1),a2
+ movl $11*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_10:
+ movl (a1),a2
+ movl $10*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_9:
+ movl (a1),a2
+ movl $9*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_8:
+ movl (a1),a2
+ movl $8*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_7:
+ movl (a1),a2
+ movl $7*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_6:
+ movl (a1),a2
+ movl $6*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_5:
+ movl (a1),a2
+ movl $5*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_4:
+ movl (a1),a2
+ movl $4*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_3:
+ movl (a1),a2
+ movl $3*8,d1
+ cmpw d1w,(a2)
+ je fast_ap
+
+ call *2(a2)
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ap_2:
+ movl (a1),a2
+ movl $2*8,d1
+ cmpw d1w,(a2)
+ jne no_fast_ap2
+
+fast_ap:
+ addl a2,d1
+ movzwl -2(a2),d0
+ movl -6(d1),a2
+#ifdef PROFILE
+ subl $20,a2
+#else
+ subl $12,a2
+#endif
+ cmpl $1,d0
+ jb repl_args_0
+ je repl_args_1
+
+ movl a0,(a3)
+ addl $4,a3
+ movl 8(a1),a0
+
+ cmpl $3,d0
+ jb repl_args_2
+
+ movl 4(a1),a1
+ je repl_args_3
+
+ cmpl $5,d0
+ jb repl_args_4
+ je repl_args_5
+
+ cmpl $7,d0
+ jb repl_args_6
+
+ push d1
+
+repl_args_7_:
+ movl -8(a0,d0,4),d1
+ movl d1,(a3)
+ subl $1,d0
+ addl $4,a3
+ cmpl $6,d0
+ jne repl_args_7_
+
+ pop d1
+
+repl_args_6:
+ movl 16(a0),d0
+ movl d0,(a3)
+ movl 12(a0),d0
+ movl d0,4(a3)
+ movl 8(a0),d0
+ movl d0,8(a3)
+ movl 4(a0),d0
+ movl (a0),a0
+ movl d0,12(a3)
+ addl $16,a3
+ jmp *a2
+
+repl_args_0:
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+ jmp *a2
+
+repl_args_1:
+ movl 4(a1),a1
+ jmp *a2
+
+repl_args_2:
+ movl 4(a1),a1
+ jmp *a2
+
+repl_args_3:
+ movl 4(a0),d0
+ movl (a0),a0
+ movl d0,(a3)
+ addl $4,a3
+ jmp *a2
+
+repl_args_4:
+ movl 8(a0),d0
+ movl d0,(a3)
+ movl 4(a0),d0
+ movl (a0),a0
+ movl d0,4(a3)
+ addl $8,a3
+ jmp *a2
+
+repl_args_5:
+ movl 12(a0),d0
+ movl d0,(a3)
+ movl 8(a0),d0
+ movl d0,4(a3)
+ movl 4(a0),d0
+ movl (a0),a0
+ movl d0,8(a3)
+ addl $12,a3
+ jmp *a2
+
+no_fast_ap2:
+ call *2(a2)
+ movl (a0),a2
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+ jmp *2(a2)
+
+
+add_empty_node_2:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae add_empty_node_2_gc
+add_empty_node_2_gc_:
+ movl a2,(a3)
+ movl $__cycle__in__spine,(a2)
+ addl $12,a2
+ movl a2,free_heap_offset(a4)
+ addl $4,a3
+ ret
+add_empty_node_2_gc:
+ call collect_2
+ jmp add_empty_node_2_gc_
+
+add_empty_node_3:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae add_empty_node_3_gc
+add_empty_node_3_gc_:
+ movl -4(a3),a2
+ movl a2,(a3)
+ movl free_heap_offset(a4),a2
+ movl a2,-4(a3)
+ movl $__cycle__in__spine,(a2)
+ addl $12,a2
+ movl a2,free_heap_offset(a4)
+ addl $4,a3
+ ret
+add_empty_node_3_gc:
+ call collect_2
+ jmp add_empty_node_3_gc_
+
+add_empty_node_4:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae add_empty_node_4_gc
+add_empty_node_4_gc_:
+ movl -4(a3),a2
+ movl a2,(a3)
+ movl -8(a3),a2
+ movl a2,-4(a3)
+ movl free_heap_offset(a4),a2
+ movl a2,-8(a3)
+ movl $__cycle__in__spine,(a2)
+ addl $12,a2
+ movl a2,free_heap_offset(a4)
+ addl $4,a3
+ ret
+add_empty_node_4_gc:
+ call collect_2
+ jmp add_empty_node_4_gc_
+
+add_empty_node_5:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae add_empty_node_5_gc
+add_empty_node_5_gc_:
+ movl -4(a3),a2
+ movl a2,(a3)
+ movl -8(a3),a2
+ movl a2,-4(a3)
+ movl -12(a3),a2
+ movl a2,-8(a3)
+ movl free_heap_offset(a4),a2
+ movl a2,-12(a3)
+ movl $__cycle__in__spine,(a2)
+ addl $12,a2
+ movl a2,free_heap_offset(a4)
+ addl $4,a3
+ ret
+add_empty_node_5_gc:
+ call collect_2
+ jmp add_empty_node_5_gc_
+
+add_empty_node_6:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae add_empty_node_6_gc
+add_empty_node_6_gc_:
+ movl -4(a3),a2
+ movl a2,(a3)
+ movl -8(a3),a2
+ movl a2,-4(a3)
+ movl -12(a3),a2
+ movl a2,-8(a3)
+ movl -16(a3),a2
+ movl a2,-12(a3)
+ movl free_heap_offset(a4),a2
+ movl a2,-16(a3)
+ movl $__cycle__in__spine,(a2)
+ addl $12,a2
+ movl a2,free_heap_offset(a4)
+ addl $4,a3
+ ret
+add_empty_node_6_gc:
+ call collect_2
+ jmp add_empty_node_6_gc_
+
+add_empty_node_7:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae add_empty_node_7_gc
+add_empty_node_7_gc_:
+ movl -4(a3),a2
+ movl a2,(a3)
+ movl -8(a3),a2
+ movl a2,-4(a3)
+ movl -12(a3),a2
+ movl a2,-8(a3)
+ movl -16(a3),a2
+ movl a2,-12(a3)
+ movl -20(a3),a2
+ movl a2,-16(a3)
+ movl free_heap_offset(a4),a2
+ movl a2,-20(a3)
+ movl $__cycle__in__spine,(a2)
+ addl $12,a2
+ movl a2,free_heap_offset(a4)
+ addl $4,a3
+ ret
+add_empty_node_7_gc:
+ call collect_2
+ jmp add_empty_node_7_gc_
+
+add_empty_node_8:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae add_empty_node_8_gc
+add_empty_node_8_gc_:
+ movl -4(a3),a2
+ movl a2,(a3)
+ movl -8(a3),a2
+ movl a2,-4(a3)
+ movl -12(a3),a2
+ movl a2,-8(a3)
+ movl -16(a3),a2
+ movl a2,-12(a3)
+ movl -20(a3),a2
+ movl a2,-16(a3)
+ movl -24(a3),a2
+ movl a2,-20(a3)
+ movl free_heap_offset(a4),a2
+ movl a2,-24(a3)
+ movl $__cycle__in__spine,(a2)
+ addl $12,a2
+ movl a2,free_heap_offset(a4)
+ addl $4,a3
+ ret
+add_empty_node_8_gc:
+ call collect_2
+ jmp add_empty_node_8_gc_
+
+add_empty_node_9:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae add_empty_node_9_gc
+add_empty_node_9_gc_:
+ movl -4(a3),a2
+ movl a2,(a3)
+ movl -8(a3),a2
+ movl a2,-4(a3)
+ movl -12(a3),a2
+ movl a2,-8(a3)
+ movl -16(a3),a2
+ movl a2,-12(a3)
+ movl -20(a3),a2
+ movl a2,-16(a3)
+ movl -24(a3),a2
+ movl a2,-20(a3)
+ movl -28(a3),a2
+ movl a2,-24(a3)
+ movl free_heap_offset(a4),a2
+ movl a2,-28(a3)
+ movl $__cycle__in__spine,(a2)
+ addl $12,a2
+ movl a2,free_heap_offset(a4)
+ addl $4,a3
+ ret
+add_empty_node_9_gc:
+ call collect_2
+ jmp add_empty_node_9_gc_
+
+add_empty_node_10:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae add_empty_node_10_gc
+add_empty_node_10_gc_:
+ movl -4(a3),a2
+ movl a2,(a3)
+ movl -8(a3),a2
+ movl a2,-4(a3)
+ movl -12(a3),a2
+ movl a2,-8(a3)
+ movl -16(a3),a2
+ movl a2,-12(a3)
+ movl -20(a3),a2
+ movl a2,-16(a3)
+ movl -24(a3),a2
+ movl a2,-20(a3)
+ movl -28(a3),a2
+ movl a2,-24(a3)
+ movl -32(a3),a2
+ movl a2,-28(a3)
+ movl free_heap_offset(a4),a2
+ movl a2,-32(a3)
+ movl $__cycle__in__spine,(a2)
+ addl $12,a2
+ movl a2,free_heap_offset(a4)
+ addl $4,a3
+ ret
+add_empty_node_10_gc:
+ call collect_2
+ jmp add_empty_node_10_gc_
+
+add_empty_node_31:
+ movl $7,d1
+ jmp add_empty_node_11_
+add_empty_node_27:
+ movl $6,d1
+ jmp add_empty_node_11_
+add_empty_node_23:
+ movl $5,d1
+ jmp add_empty_node_11_
+add_empty_node_19:
+ movl $4,d1
+ jmp add_empty_node_11_
+add_empty_node_15:
+ movl $3,d1
+ jmp add_empty_node_11_
+add_empty_node_11:
+ movl $2,d1
+add_empty_node_11_:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae add_empty_node_11_gc
+add_empty_node_11_gc_:
+ movl a3,d0
+ movl -4(a3),a2
+ movl a2,(a3)
+add_empty_node_11_lp:
+ movl -8(d0),a2
+ movl a2,-4(d0)
+ movl -12(d0),a2
+ movl a2,-8(d0)
+ movl -16(d0),a2
+ movl a2,-12(d0)
+ movl -20(d0),a2
+ movl a2,-16(d0)
+ subl $16,d0
+ subl $1,d1
+ jne add_empty_node_11_lp
+ movl free_heap_offset(a4),a2
+ movl a2,-4(d0)
+ movl $__cycle__in__spine,(a2)
+ addl $12,a2
+ movl a2,free_heap_offset(a4)
+ addl $4,a3
+ ret
+add_empty_node_11_gc:
+ call collect_2
+ jmp add_empty_node_11_gc_
+
+add_empty_node_32:
+ movl $7,d1
+ jmp add_empty_node_12_
+add_empty_node_28:
+ movl $6,d1
+ jmp add_empty_node_12_
+add_empty_node_24:
+ movl $5,d1
+ jmp add_empty_node_12_
+add_empty_node_20:
+ movl $4,d1
+ jmp add_empty_node_12_
+add_empty_node_16:
+ movl $3,d1
+ jmp add_empty_node_12_
+add_empty_node_12:
+ movl $2,d1
+add_empty_node_12_:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae add_empty_node_12_gc
+add_empty_node_12_gc_:
+ movl a3,d0
+ movl -4(a3),a2
+ movl a2,(a3)
+ movl -8(a3),a2
+ movl a2,-4(a3)
+add_empty_node_12_lp:
+ movl -12(d0),a2
+ movl a2,-8(d0)
+ movl -16(d0),a2
+ movl a2,-12(d0)
+ movl -20(d0),a2
+ movl a2,-16(d0)
+ movl -24(d0),a2
+ movl a2,-20(d0)
+ subl $16,d0
+ subl $1,d1
+ jne add_empty_node_12_lp
+ movl free_heap_offset(a4),a2
+ movl a2,-8(d0)
+ movl $__cycle__in__spine,(a2)
+ addl $12,a2
+ movl a2,free_heap_offset(a4)
+ addl $4,a3
+ ret
+add_empty_node_12_gc:
+ call collect_2
+ jmp add_empty_node_12_gc_
+
+add_empty_node_29:
+ movl $6,d1
+ jmp add_empty_node_13_
+add_empty_node_25:
+ movl $5,d1
+ jmp add_empty_node_13_
+add_empty_node_21:
+ movl $4,d1
+ jmp add_empty_node_13_
+add_empty_node_17:
+ movl $3,d1
+ jmp add_empty_node_13_
+add_empty_node_13:
+ movl $2,d1
+add_empty_node_13_:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae add_empty_node_13_gc
+add_empty_node_13_gc_:
+ movl a3,d0
+ movl -4(a3),a2
+ movl a2,(a3)
+ movl -8(a3),a2
+ movl a2,-4(a3)
+ movl -12(a3),a2
+ movl a2,-8(a3)
+add_empty_node_13_lp:
+ movl -16(d0),a2
+ movl a2,-12(d0)
+ movl -20(d0),a2
+ movl a2,-16(d0)
+ movl -24(d0),a2
+ movl a2,-20(d0)
+ movl -28(d0),a2
+ movl a2,-24(d0)
+ subl $16,d0
+ subl $1,d1
+ jne add_empty_node_13_lp
+ movl free_heap_offset(a4),a2
+ movl a2,-12(d0)
+ movl $__cycle__in__spine,(a2)
+ addl $12,a2
+ movl a2,free_heap_offset(a4)
+ addl $4,a3
+ ret
+add_empty_node_13_gc:
+ call collect_2
+ jmp add_empty_node_13_gc_
+
+add_empty_node_30:
+ movl $7,d1
+ jmp add_empty_node_14_
+add_empty_node_26:
+ movl $6,d1
+ jmp add_empty_node_14_
+add_empty_node_22:
+ movl $5,d1
+ jmp add_empty_node_14_
+add_empty_node_18:
+ movl $4,d1
+ jmp add_empty_node_14_
+add_empty_node_14:
+ movl $3,d1
+ jmp add_empty_node_14_
+add_empty_node_14_:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae add_empty_node_14_gc
+add_empty_node_14_gc_:
+ movl a3,d0
+add_empty_node_14_lp:
+ movl -4(d0),a2
+ movl a2,(d0)
+ movl -8(d0),a2
+ movl a2,-4(d0)
+ movl -12(d0),a2
+ movl a2,-8(d0)
+ movl -16(d0),a2
+ movl a2,-12(d0)
+ subl $16,d0
+ subl $1,d1
+ jne add_empty_node_14_lp
+ movl free_heap_offset(a4),a2
+ movl a2,(d0)
+ movl $__cycle__in__spine,(a2)
+ addl $12,a2
+ movl a2,free_heap_offset(a4)
+ addl $4,a3
+ ret
+add_empty_node_14_gc:
+ call collect_2
+ jmp add_empty_node_14_gc_
+
+yet_args_needed_0:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae yet_args_needed_0_gc
+yet_args_needed_0_gc_r:
+ mov a0,4(a2)
+ mov (a1),d0
+ mov a2,a0
+ add $8,d0
+ mov d0,(a2)
+ add $8,a2
+ movl a2,free_heap_offset(a4)
+ ret
+
+yet_args_needed_0_gc:
+ call collect_2
+ jmp yet_args_needed_0_gc_r
+
+
+ align (2)
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jmp build_node_2
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_1:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae yet_args_needed_1_gc
+yet_args_needed_1_gc_r:
+ mov a0,8(a2)
+ mov (a1),d0
+ mov a2,a0
+ add $8,d0
+ mov d0,(a2)
+ mov 4(a1),d1
+ mov d1,4(a2)
+ add $12,a2
+ movl a2,free_heap_offset(a4)
+ ret
+
+yet_args_needed_1_gc:
+ call collect_2
+ jmp yet_args_needed_1_gc_r
+
+build_node_2:
+ jae build_node_2_gc
+build_node_2_gc_r:
+ movl d1,(a2)
+ movl a1,4(a2)
+ movl a0,8(a2)
+ movl a2,a0
+ addl $12,a2
+ movl a2,free_heap_offset(a4)
+ ret
+
+build_node_2_gc:
+ call collect_2
+ jmp build_node_2_gc_r
+
+
+ align (2)
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jmp build_node_3
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_2:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae gc_22
+gc_r_22:
+ mov (a1),d0
+ mov a0,4(a2)
+ add $8,d0
+ mov 4(a1),d1
+ mov d0,8(a2)
+ lea 8(a2),a0
+ mov d1,12(a2)
+ mov 8(a1),d1
+ mov d1,(a2)
+ mov a2,16(a2)
+ add $20,a2
+ movl a2,free_heap_offset(a4)
+ ret
+
+gc_22: call collect_2
+ jmp gc_r_22
+
+build_node_3:
+ jae build_node_3_gc
+build_node_3_gc_r:
+ movl d1,(a2)
+ lea 12(a2),d1
+ movl a1,4(a2)
+ movl d1,8(a2)
+ movl a0,12(a2)
+ movl a2,a0
+ movl -4(a3),d1
+ subl $4,a3
+ movl d1,16(a2)
+ addl $20,a2
+ movl a2,free_heap_offset(a4)
+ ret
+
+build_node_3_gc:
+ call collect_2
+ jmp build_node_3_gc_r
+
+
+ align (2)
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jmp build_node_4
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_3:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae gc_23
+gc_r_23:
+ mov (a1),d0
+ mov a0,8(a2)
+ add $8,d0
+ mov 4(a1),d1
+ mov d0,12(a2)
+ mov 8(a1),a1
+ mov d1,16(a2)
+ mov (a1),d1
+ mov a2,20(a2)
+ mov d1,(a2)
+ mov 4(a1),d1
+ lea 12(a2),a0
+ mov d1,4(a2)
+ add $24,a2
+ movl a2,free_heap_offset(a4)
+ ret
+
+gc_23: call collect_2
+ jmp gc_r_23
+
+build_node_4:
+ jae build_node_4_gc
+build_node_4_gc_r:
+ movl d1,(a2)
+ lea 12(a2),d1
+ movl a1,4(a2)
+ movl d1,8(a2)
+ movl a0,12(a2)
+ movl a2,a0
+ movl -4(a3),d1
+ movl d1,16(a2)
+ movl -8(a3),d1
+ subl $8,a3
+ movl d1,20(a2)
+ addl $24,a2
+ movl a2,free_heap_offset(a4)
+ ret
+
+build_node_4_gc:
+ call collect_2
+ jmp build_node_4_gc_r
+
+
+ align (2)
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jmp build_node_5
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_4:
+ movl free_heap_offset(a4),a2
+ cmpl end_heap_offset(a4),a2
+ jae gc_24
+gc_r_24:
+ mov (a1),d0
+ mov a0,12(a2)
+ add $8,d0
+ mov 4(a1),d1
+ mov d0,16(a2)
+ mov 8(a1),a1
+ mov d1,20(a2)
+ mov (a1),d1
+ mov a2,24(a2)
+ mov d1,(a2)
+ mov 4(a1),d1
+ lea 16(a2),a0
+ mov d1,4(a2)
+ mov 8(a1),d1
+ mov d1,8(a2)
+ add $28,a2
+ movl a2,free_heap_offset(a4)
+ ret
+
+gc_24: call collect_2
+ jmp gc_r_24
+
+build_node_5:
+ jae build_node_5_gc
+build_node_5_gc_r:
+ movl d1,(a2)
+ lea 12(a2),d1
+ movl a1,4(a2)
+ movl d1,8(a2)
+ movl a0,12(a2)
+ movl a2,a0
+ movl -4(a3),d1
+ movl d1,16(a2)
+ movl -8(a3),d1
+ movl d1,20(a2)
+ movl -12(a3),d1
+ subl $12,a3
+ movl d1,24(a2)
+ addl $28,a2
+ movl a2,free_heap_offset(a4)
+ ret
+
+build_node_5_gc:
+ call collect_2
+ jmp build_node_5_gc_r
+
+
+ align (2)
+ movl $6,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_5:
+ mov (a1),d1
+ movl $8,d0
+ jmp yet_args_needed_
+
+
+ align (2)
+ movl $7,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_6:
+ mov (a1),d1
+ movl $9,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $8,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_7:
+ mov (a1),d1
+ movl $10,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $9,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_8:
+ mov (a1),d1
+ movl $11,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $10,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_9:
+ mov (a1),d1
+ movl $12,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $11,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_10:
+ mov (a1),d1
+ movl $13,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $12,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_11:
+ mov (a1),d1
+ movl $14,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $13,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_12:
+ mov (a1),d1
+ movl $15,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $14,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_13:
+ mov (a1),d1
+ movl $16,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $15,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_14:
+ mov (a1),d1
+ movl $17,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $16,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_15:
+ mov (a1),d1
+ movl $18,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $17,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_16:
+ mov (a1),d1
+ movl $19,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $18,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_17:
+ mov (a1),d1
+ movl $20,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $19,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_18:
+ mov (a1),d1
+ movl $21,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $20,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_19:
+ mov (a1),d1
+ movl $22,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $21,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_20:
+ mov (a1),d1
+ movl $23,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $22,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_21:
+ mov (a1),d1
+ movl $24,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $23,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_22:
+ mov (a1),d1
+ movl $25,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $24,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_23:
+ mov (a1),d1
+ movl $26,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $25,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_24:
+ mov (a1),d1
+ movl $27,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $26,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_25:
+ mov (a1),d1
+ movl $28,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $27,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_26:
+ mov (a1),d1
+ movl $29,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $28,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_27:
+ mov (a1),d1
+ movl $30,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $29,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_28:
+ mov (a1),d1
+ movl $31,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $30,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_29:
+ mov (a1),d1
+ movl $32,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $31,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_30:
+ mov (a1),d1
+ movl $33,d0
+ jmp yet_args_needed_
+
+ align (2)
+ movl $32,d0
+ jmp build_node_
+ nop
+ nop
+ align (2)
+#ifdef PROFILE
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+#endif
+yet_args_needed_31:
+ mov (a1),d1
+ movl $34,d0
+ jmp yet_args_needed_
+
+yet_args_needed:
+/ for more than 4 arguments
+ mov (a1),d1
+ movzwl -2(d1),d0
+ add $3,d0
+yet_args_needed_:
+ movl free_heap_offset(a4),a2
+ lea -32(a2,d0,4),a2
+ cmpl end_heap_offset(a4),a2
+ jae yet_args_needed_gc
+yet_args_needed_gc_r:
+ sub $3+1+4,d0
+ push d1
+ push a0
+ mov 4(a1),d1
+ mov 8(a1),a1
+ movl free_heap_offset(a4),a2
+ mov (a1),a0
+ mov a0,(a2)
+ mov 4(a1),a0
+ mov a0,4(a2)
+ mov 8(a1),a0
+ mov a0,8(a2)
+ add $12,a1
+ add $12,a2
+
+yet_args_needed_cp_a:
+ mov (a1),a0
+ add $4,a1
+ mov a0,(a2)
+ add $4,a2
+ subl $1,d0
+ jge yet_args_needed_cp_a
+
+ pop a0
+ mov a0,(a2)
+ pop d0
+ add $8,d0
+ mov d0,4(a2)
+ lea 4(a2),a0
+ mov d1,8(a2)
+ movl free_heap_offset(a4),d1
+ mov d1,12(a2)
+ add $16,a2
+ movl a2,free_heap_offset(a4)
+ ret
+
+yet_args_needed_gc:
+ call collect_2l
+ jmp yet_args_needed_gc_r
+
+build_node_:
+ movl free_heap_offset(a4),a2
+ lea -32+8(a2,d0,4),a2
+ cmpl end_heap_offset(a4),a2
+ jae build_node_gc
+build_node_gc_r:
+ movl free_heap_offset(a4),a2
+ movl d1,(a2)
+ lea 12(a2),d1
+ movl a1,4(a2)
+ movl d1,8(a2)
+ movl a0,12(a2)
+ movl a2,a0
+ movl -4(a3),d1
+ movl d1,16(a2)
+ movl -8(a3),d1
+ movl d1,20(a2)
+ movl -12(a3),d1
+ subl $12,a3
+ movl d1,24(a2)
+ addl $28,a2
+
+ subl $5,d0
+build_node_cp_a:
+ movl -4(a3),d1
+ subl $4,a3
+ movl d1,(a2)
+ addl $4,a2
+ subl $1,d0
+ jne build_node_cp_a
+
+ movl a2,free_heap_offset(a4)
+ ret
+
+build_node_gc:
+ call collect_2l
+ jmp build_node_gc_r
+
+ .globl apupd_1
+ .globl apupd_2
+ .globl apupd_3
+ .globl apupd_4
+ .globl apupd_5
+ .globl apupd_6
+ .globl apupd_7
+ .globl apupd_8
+ .globl apupd_9
+ .globl apupd_10
+ .globl apupd_11
+ .globl apupd_12
+ .globl apupd_13
+ .globl apupd_14
+ .globl apupd_15
+ .globl apupd_16
+ .globl apupd_17
+ .globl apupd_18
+ .globl apupd_19
+ .globl apupd_20
+ .globl apupd_21
+ .globl apupd_22
+ .globl apupd_23
+ .globl apupd_24
+ .globl apupd_25
+ .globl apupd_26
+ .globl apupd_27
+ .globl apupd_28
+ .globl apupd_29
+ .globl apupd_30
+ .globl apupd_31
+ .globl apupd_32
+ .globl __indirection
+
+apupd_1:
+ cmpl $apupd_upd,(sp)
+ lea ap_1,a2
+ jne ap_upd
+
+ movl -4(a3),a2
+ movl -8(a3),d0
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ap_1:
+ movl (a1),a2
+ jmp *2(a2)
+
+apupd_2:
+ cmpl $apupd_upd,(sp)
+ lea ap_2,a2
+ jne ap_upd
+
+ movl -8(a3),a2
+ movl -12(a3),d0
+ movl -4(a3),d1
+ movl d1,-8(a3)
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_2
+
+apupd_3:
+ cmpl $apupd_upd,(sp)
+ lea ap_3,a2
+ jne ap_upd
+
+ movl -12(a3),a2
+ movl -16(a3),d0
+ movl -8(a3),d1
+ movl d1,-12(a3)
+ movl -4(a3),d1
+ movl d1,-8(a3)
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_3
+
+apupd_4:
+ cmpl $apupd_upd,(sp)
+ lea ap_4,a2
+ jne ap_upd
+
+ movl -16(a3),a2
+ movl -20(a3),d0
+ movl -12(a3),d1
+ movl d1,-16(a3)
+ movl -8(a3),d1
+ movl d1,-12(a3)
+ movl -4(a3),d1
+ movl d1,-8(a3)
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_4
+
+apupd_5:
+ cmpl $apupd_upd,(sp)
+ lea ap_5,a2
+ jne ap_upd
+
+ movl -20(a3),a2
+ movl -24(a3),d0
+ movl -16(a3),d1
+ movl d1,-20(a3)
+ movl -12(a3),d1
+ movl d1,-16(a3)
+ movl -8(a3),d1
+ movl d1,-12(a3)
+ movl -4(a3),d1
+ movl d1,-8(a3)
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_5
+
+apupd_6:
+ cmpl $apupd_upd,(sp)
+ lea ap_6,a2
+ jne ap_upd
+
+ movl -24(a3),a2
+ movl -28(a3),d0
+ movl -20(a3),d1
+ movl d1,-24(a3)
+ movl -16(a3),d1
+ movl d1,-20(a3)
+ movl -12(a3),d1
+ movl d1,-16(a3)
+ movl -8(a3),d1
+ movl d1,-12(a3)
+ movl -4(a3),d1
+ movl d1,-8(a3)
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_6
+
+apupd_7:
+ cmpl $apupd_upd,(sp)
+ lea ap_7,a2
+ jne ap_upd
+
+ movl -28(a3),a2
+ movl -32(a3),d0
+ call move_8
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_7
+
+apupd_8:
+ cmpl $apupd_upd,(sp)
+ lea ap_8,a2
+ jne ap_upd
+
+ movl -32(a3),a2
+ movl -36(a3),d0
+ call move_9
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_8
+
+apupd_9:
+ cmpl $apupd_upd,(sp)
+ lea ap_9,a2
+ jne ap_upd
+
+ movl -36(a3),a2
+ movl -40(a3),d0
+ call move_10
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_9
+
+apupd_10:
+ cmpl $apupd_upd,(sp)
+ lea ap_10,a2
+ jne ap_upd
+
+ movl -40(a3),a2
+ movl -44(a3),d0
+ call move_11
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_10
+
+apupd_11:
+ cmpl $apupd_upd,(sp)
+ lea ap_11,a2
+ jne ap_upd
+
+ movl -44(a3),a2
+ movl -48(a3),d0
+ call move_12
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_11
+
+apupd_12:
+ cmpl $apupd_upd,(sp)
+ lea ap_12,a2
+ jne ap_upd
+
+ movl -48(a3),a2
+ movl -52(a3),d0
+ call move_13
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_12
+
+apupd_13:
+ cmpl $apupd_upd,(sp)
+ lea ap_13,a2
+ jne ap_upd
+
+ movl -52(a3),a2
+ movl -56(a3),d0
+ call move_14
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_13
+
+apupd_14:
+ cmpl $apupd_upd,(sp)
+ lea ap_14,a2
+ jne ap_upd
+
+ movl -56(a3),a2
+ movl -60(a3),d0
+ call move_15
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_14
+
+apupd_15:
+ cmpl $apupd_upd,(sp)
+ lea ap_15,a2
+ jne ap_upd
+
+ movl -60(a3),a2
+ movl -64(a3),d0
+ call move_16
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_15
+
+apupd_16:
+ cmpl $apupd_upd,(sp)
+ lea ap_16,a2
+ jne ap_upd
+
+ movl -64(a3),a2
+ movl -68(a3),d0
+ call move_17
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_16
+
+apupd_17:
+ cmpl $apupd_upd,(sp)
+ lea ap_17,a2
+ jne ap_upd
+
+ movl -68(a3),a2
+ movl -72(a3),d0
+ call move_18
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_17
+
+apupd_18:
+ cmpl $apupd_upd,(sp)
+ lea ap_18,a2
+ jne ap_upd
+
+ movl -72(a3),a2
+ movl -76(a3),d0
+ call move_19
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_18
+
+apupd_19:
+ cmpl $apupd_upd,(sp)
+ lea ap_19,a2
+ jne ap_upd
+
+ movl -76(a3),a2
+ movl -80(a3),d0
+ call move_20
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_19
+
+apupd_20:
+ cmpl $apupd_upd,(sp)
+ lea ap_20,a2
+ jne ap_upd
+
+ movl -80(a3),a2
+ movl -84(a3),d0
+ call move_21
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_20
+
+apupd_21:
+ cmpl $apupd_upd,(sp)
+ lea ap_21,a2
+ jne ap_upd
+
+ movl -84(a3),a2
+ movl -88(a3),d0
+ call move_22
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_21
+
+apupd_22:
+ cmpl $apupd_upd,(sp)
+ lea ap_22,a2
+ jne ap_upd
+
+ movl -88(a3),a2
+ movl -92(a3),d0
+ call move_23
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_22
+
+apupd_23:
+ cmpl $apupd_upd,(sp)
+ lea ap_23,a2
+ jne ap_upd
+
+ movl -92(a3),a2
+ movl -96(a3),d0
+ call move_24
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_23
+
+apupd_24:
+ cmpl $apupd_upd,(sp)
+ lea ap_24,a2
+ jne ap_upd
+
+ movl -96(a3),a2
+ movl -100(a3),d0
+ call move_25
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_24
+
+apupd_25:
+ cmpl $apupd_upd,(sp)
+ lea ap_25,a2
+ jne ap_upd
+
+ movl -100(a3),a2
+ movl -104(a3),d0
+ call move_26
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_25
+
+apupd_26:
+ cmpl $apupd_upd,(sp)
+ lea ap_26,a2
+ jne ap_upd
+
+ movl -104(a3),a2
+ movl -108(a3),d0
+ call move_27
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_26
+
+apupd_27:
+ cmpl $apupd_upd,(sp)
+ lea ap_27,a2
+ jne ap_upd
+
+ movl -108(a3),a2
+ movl -112(a3),d0
+ call move_28
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_27
+
+apupd_28:
+ cmpl $apupd_upd,(sp)
+ lea ap_28,a2
+ jne ap_upd
+
+ movl -112(a3),a2
+ movl -116(a3),d0
+ call move_29
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_28
+
+apupd_29:
+ cmpl $apupd_upd,(sp)
+ lea ap_29,a2
+ jne ap_upd
+
+ movl -116(a3),a2
+ movl -120(a3),d0
+ call move_30
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_29
+
+apupd_30:
+ cmpl $apupd_upd,(sp)
+ lea ap_30,a2
+ jne ap_upd
+
+ movl -120(a3),a2
+ movl -124(a3),d0
+ call move_31
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_30
+
+apupd_31:
+ cmpl $apupd_upd,(sp)
+ lea ap_31,a2
+ jne ap_upd
+
+ movl -124(a3),a2
+ movl -128(a3),d0
+ call move_32
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_31
+
+apupd_32:
+ cmpl $apupd_upd,(sp)
+ lea ap_32,a2
+ jne ap_upd
+
+ movl -128(a3),a2
+ movl -132(a3),d0
+ call move_33
+ subl $4,a3
+ movl $__indirection,(a2)
+ movl d0,4(a2)
+ jmp ap_32
+
+ nop
+
+ap_upd:
+ call *a2
+apupd_upd:
+ movl -4(a3),a1
+ subl $4,a3
+ movl (a0),d0
+ movl d0,(a1)
+ movl 4(a0),d0
+ movl d0,4(a1)
+ movl 8(a0),d0
+ movl a1,a0
+ movl d0,8(a1)
+ ret
+
+ .globl jmpupd_0
+ .globl jmpupd_1
+ .globl jmpupd_2
+ .globl jmpupd_3
+ .globl jmpupd_4
+ .globl jmpupd_5
+ .globl jmpupd_6
+ .globl jmpupd_7
+ .globl jmpupd_8
+ .globl jmpupd_9
+ .globl jmpupd_10
+ .globl jmpupd_11
+ .globl jmpupd_12
+ .globl jmpupd_13
+ .globl jmpupd_14
+ .globl jmpupd_15
+ .globl jmpupd_16
+ .globl jmpupd_17
+ .globl jmpupd_18
+ .globl jmpupd_19
+ .globl jmpupd_20
+ .globl jmpupd_21
+ .globl jmpupd_22
+ .globl jmpupd_23
+ .globl jmpupd_24
+ .globl jmpupd_25
+ .globl jmpupd_26
+ .globl jmpupd_27
+ .globl jmpupd_28
+ .globl jmpupd_29
+ .globl jmpupd_30
+ .globl jmpupd_31
+ .globl jmpupd_32
+
+jmpupd_0:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -4(a3),d0
+ movl -8(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_1:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -4(a3),d0
+ movl -8(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_2:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -4(a3),d0
+ movl -8(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_3:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -8(a3),d0
+ movl -4(a3),d1
+ movl d1,-8(a3)
+ movl -12(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_4:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -12(a3),d0
+ movl -8(a3),d1
+ movl d1,-12(a3)
+ movl -4(a3),d1
+ movl d1,-8(a3)
+ movl -16(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_5:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -16(a3),d0
+ movl -12(a3),d1
+ movl d1,-16(a3)
+ movl -8(a3),d1
+ movl d1,-12(a3)
+ movl -4(a3),d1
+ movl d1,-8(a3)
+ movl -20(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_6:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -20(a3),d0
+ movl -16(a3),d1
+ movl d1,-20(a3)
+ movl -12(a3),d1
+ movl d1,-16(a3)
+ movl -8(a3),d1
+ movl d1,-12(a3)
+ movl -4(a3),d1
+ movl d1,-8(a3)
+ movl -24(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_7:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -24(a3),d0
+ call move_7
+ movl -28(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_8:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -28(a3),d0
+ call move_8
+ movl -32(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_9:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -32(a3),d0
+ call move_9
+ movl -36(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_10:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -36(a3),d0
+ call move_10
+ movl -40(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_11:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -40(a3),d0
+ call move_11
+ movl -44(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_12:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -44(a3),d0
+ call move_12
+ movl -48(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_13:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -48(a3),d0
+ call move_13
+ movl -52(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_14:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -52(a3),d0
+ call move_14
+ movl -56(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_15:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -56(a3),d0
+ call move_15
+ movl -60(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_16:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -60(a3),d0
+ call move_16
+ movl -64(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_17:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -64(a3),d0
+ call move_17
+ movl -68(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_18:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -68(a3),d0
+ call move_18
+ movl -72(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_19:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -72(a3),d0
+ call move_19
+ movl -76(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_20:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -76(a3),d0
+ call move_20
+ movl -80(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_21:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -80(a3),d0
+ call move_21
+ movl -84(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_22:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -84(a3),d0
+ call move_22
+ movl -88(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_23:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -88(a3),d0
+ call move_23
+ movl -92(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_24:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -92(a3),d0
+ call move_24
+ movl -96(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_25:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -96(a3),d0
+ call move_25
+ movl -100(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_26:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -100(a3),d0
+ call move_26
+ movl -104(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_27:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -104(a3),d0
+ call move_27
+ movl -108(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_28:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -108(a3),d0
+ call move_28
+ movl -112(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_29:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -112(a3),d0
+ call move_29
+ movl -116(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_30:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -116(a3),d0
+ call move_30
+ movl -120(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_31:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -120(a3),d0
+ call move_31
+ movl -124(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+jmpupd_32:
+ cmpl $apupd_upd,(sp)
+ jne ap_upd
+
+ movl -124(a3),d0
+ call move_32
+ movl -128(a3),d1
+ subl $4,a3
+ movl $__indirection,(d0)
+ movl d1,4(d0)
+ jmp *a2
+
+move_33:
+ movl -124(a3),d1
+ movl d1,-128(a3)
+move_32:
+ movl -120(a3),d1
+ movl d1,-124(a3)
+move_31:
+ movl -116(a3),d1
+ movl d1,-120(a3)
+move_30:
+ movl -112(a3),d1
+ movl d1,-116(a3)
+move_29:
+ movl -108(a3),d1
+ movl d1,-112(a3)
+move_28:
+ movl -104(a3),d1
+ movl d1,-108(a3)
+move_27:
+ movl -100(a3),d1
+ movl d1,-104(a3)
+move_26:
+ movl -96(a3),d1
+ movl d1,-100(a3)
+move_25:
+ movl -92(a3),d1
+ movl d1,-96(a3)
+move_24:
+ movl -88(a3),d1
+ movl d1,-92(a3)
+move_23:
+ movl -84(a3),d1
+ movl d1,-88(a3)
+move_22:
+ movl -80(a3),d1
+ movl d1,-84(a3)
+move_21:
+ movl -76(a3),d1
+ movl d1,-80(a3)
+move_20:
+ movl -72(a3),d1
+ movl d1,-76(a3)
+move_19:
+ movl -68(a3),d1
+ movl d1,-72(a3)
+move_18:
+ movl -64(a3),d1
+ movl d1,-68(a3)
+move_17:
+ movl -60(a3),d1
+ movl d1,-64(a3)
+move_16:
+ movl -56(a3),d1
+ movl d1,-60(a3)
+move_15:
+ movl -52(a3),d1
+ movl d1,-56(a3)
+move_14:
+ movl -48(a3),d1
+ movl d1,-52(a3)
+move_13:
+ movl -44(a3),d1
+ movl d1,-48(a3)
+move_12:
+ movl -40(a3),d1
+ movl d1,-44(a3)
+move_11:
+ movl -36(a3),d1
+ movl d1,-40(a3)
+move_10:
+ movl -32(a3),d1
+ movl d1,-36(a3)
+move_9:
+ movl -28(a3),d1
+ movl d1,-32(a3)
+move_8:
+ movl -24(a3),d1
+ movl d1,-28(a3)
+move_7:
+ movl -20(a3),d1
+ movl d1,-24(a3)
+ movl -16(a3),d1
+ movl d1,-20(a3)
+ movl -12(a3),d1
+ movl d1,-16(a3)
+ movl -8(a3),d1
+ movl d1,-12(a3)
+ movl -4(a3),d1
+ movl d1,-8(a3)
+ ret
diff --git a/thread/icompact.s b/thread/icompact.s
new file mode 100644
index 0000000..5a6558b
--- /dev/null
+++ b/thread/icompact.s
@@ -0,0 +1,1303 @@
+
+/ mark used nodes and pointers in argument parts and link backward pointers
+
+ movl heap_vector_offset(a4),d0
+ shrl $2,d0
+ movl d0,heap_vector_d4_offset(a4)
+
+ movl heap_size_33_offset(a4),d0
+ shl $5,d0
+ movl d0,heap_size_32_33_offset(a4)
+
+ lea -8000(sp),a3
+
+ movl caf_list,d0
+ movl a3,end_stack_offset(a4)
+
+ test d0,d0
+ je end_mark_cafs
+
+mark_cafs_lp:
+ pushl -4(d0)
+
+ lea 4(d0),a3
+ movl (d0),d0
+ lea (a3,d0,4),a0
+
+ movl a0,end_vector_offset(a4)
+
+ call rmark_stack_nodes
+
+ popl d0
+ test d0,d0
+ jne mark_cafs_lp
+
+end_mark_cafs:
+ movl stack_p_offset(a4),a3
+
+ movl stack_top_offset(a4),a0
+ movl a0,end_vector_offset(a4)
+ call rmark_stack_nodes
+
+ jmp compact_heap
+
+#include "icompact_rmark.s"
+#include "icompact_rmarkr.s"
+
+/ compact the heap
+
+compact_heap:
+
+#ifdef FINALIZERS
+ movl $finalizer_list,a0
+ movl $free_finalizer_list,a1
+
+ movl (a0),a2
+determine_free_finalizers_after_compact1:
+ cmpl $__Nil-4,a2
+ je end_finalizers_after_compact1
+
+ movl neg_heap_p3_offset(a4),d1
+ movl heap_vector_offset(a4),a3
+ addl a2,d1
+
+ movl $31*4,d0
+ andl d1,d0
+ shrl $7,d1
+
+ movl bit_set_table(d0),d0
+ testl (a3,d1,4),d0
+ je finalizer_not_used_after_compact1
+
+ movl (a2),d0
+ movl a2,a3
+ jmp finalizer_find_descriptor
+
+finalizer_find_descriptor_lp:
+ andl $-4,d0
+ movl d0,a3
+ movl (d0),d0
+finalizer_find_descriptor:
+ test $1,d0
+ jne finalizer_find_descriptor_lp
+
+ movl $e____system__kFinalizerGCTemp+2,(a3)
+
+ cmpl a0,a2
+ ja finalizer_no_reverse
+
+ movl (a2),d0
+ leal 1(a0),a3
+ movl a3,(a2)
+ movl d0,(a0)
+
+finalizer_no_reverse:
+ lea 4(a2),a0
+ movl 4(a2),a2
+ jmp determine_free_finalizers_after_compact1
+
+finalizer_not_used_after_compact1:
+ movl $e____system__kFinalizerGCTemp+2,(a2)
+
+ movl a2,(a1)
+ lea 4(a2),a1
+
+ movl 4(a2),a2
+ movl a2,(a0)
+
+ jmp determine_free_finalizers_after_compact1
+
+end_finalizers_after_compact1:
+ movl a2,(a1)
+
+ movl finalizer_list,a0
+ cmpl $__Nil-4,a0
+ je finalizer_list_empty
+ testl $3,a0
+ jne finalizer_list_already_reversed
+ movl (a0),d0
+ movl $finalizer_list+1,(a0)
+ movl d0,finalizer_list
+finalizer_list_already_reversed:
+finalizer_list_empty:
+
+ movl $free_finalizer_list,a3
+ cmpl $__Nil-4,(a3)
+
+ je free_finalizer_list_empty
+
+ movl $free_finalizer_list+4,end_vector_offset(a4)
+
+ call rmark_stack_nodes
+free_finalizer_list_empty:
+#endif
+
+a4_compact_sp_offset = 0
+heap_p3_compact_sp_offset = 4
+heap_vector_compact_sp_offset = 8
+neg_heap_p3_compact_sp_offset = 12
+neg_heap_vector_plus_4_compact_sp_offset = 16
+vector_counter_compact_sp_offset = 20
+vector_p_compact_sp_offset = 24
+end_heap_p3_compact_sp_offset = 28
+compact_sp_offset_2 = 32
+compact_sp_offset_1 = 36
+
+ lea -40(sp),sp
+ movl a4,a4_compact_sp_offset(sp)
+
+ movl heap_p3_offset(a4),d0
+ movl d0,heap_p3_compact_sp_offset(sp)
+
+ movl neg_heap_p3_offset(a4),d0
+ movl d0,neg_heap_p3_compact_sp_offset(sp)
+
+ movl heap_vector_offset(a4),d0
+ movl d0,heap_vector_compact_sp_offset(sp)
+
+ movl heap_size_33_offset(a4),d0
+
+ movl d0,d1
+ shl $5,d1
+
+ addl heap_p3_compact_sp_offset(sp),d1
+
+ movl d1,end_heap_p3_compact_sp_offset(sp)
+
+ addl $3,d0
+ shr $2,d0
+
+ movl heap_vector_compact_sp_offset(sp),a0
+
+ lea 4(a0),d1
+ negl d1
+ movl d1,neg_heap_vector_plus_4_compact_sp_offset(sp)
+
+ movl heap_p3_compact_sp_offset(sp),a4
+ xorl a3,a3
+ jmp skip_zeros
+
+/ d0,a0,a2: free
+find_non_zero_long:
+ movl vector_counter_compact_sp_offset(sp),d0
+ movl vector_p_compact_sp_offset(sp),a0
+skip_zeros:
+ subl $1,d0
+ jc end_copy
+ movl (a0),a3
+ addl $4,a0
+ testl a3,a3
+ je skip_zeros
+/ a2: free
+end_skip_zeros:
+ movl neg_heap_vector_plus_4_compact_sp_offset(sp),a2
+ movl d0,vector_counter_compact_sp_offset(sp)
+
+ addl a0,a2
+ movl a0,vector_p_compact_sp_offset(sp)
+
+ shl $5,a2
+ addl heap_p3_compact_sp_offset(sp),a2
+
+#ifdef NO_BIT_INSTRUCTIONS
+bsf_and_copy_nodes:
+ movl a3,d0
+ movl a3,a0
+ andl $0xff,d0
+ jne found_bit1
+ andl $0xff00,a0
+ jne found_bit2
+ movl a3,d0
+ movl a3,a0
+ andl $0xff0000,d0
+ jne found_bit3
+ shrl $24,a0
+ movzbl first_one_bit_table(,a0,1),d1
+ addl $24,d1
+ jmp copy_nodes
+
+found_bit3:
+ shrl $16,d0
+ movzbl first_one_bit_table(,d0,1),d1
+ addl $16,d1
+ jmp copy_nodes
+
+found_bit2:
+ shrl $8,a0
+ movzbl first_one_bit_table(,a0,1),d1
+ addl $8,d1
+ jmp copy_nodes
+
+found_bit1:
+ movzbl first_one_bit_table(,d0,1),d1
+#else
+ bsf a3,d1
+#endif
+
+copy_nodes:
+ movl (a2,d1,4),d0
+#ifdef NO_BIT_INSTRUCTIONS
+ andl bit_clear_table(,d1,4),a3
+#else
+ btr d1,a3
+#endif
+ leal 4(a2,d1,4),a0
+ dec d0
+
+ test $2,d0
+ je begin_update_list_2
+
+ movl -10(d0),d1
+ subl $2,d0
+
+ test $1,d1
+ je end_list_2
+find_descriptor_2:
+ andl $-4,d1
+ movl (d1),d1
+ test $1,d1
+ jne find_descriptor_2
+
+end_list_2:
+ movl d1,a1
+ movzwl -2(d1),d1
+ cmpl $256,d1
+ jb no_record_arguments
+
+ movzwl -2+2(a1),a1
+ subl $2,a1
+ jae copy_record_arguments_aa
+
+ subl $256+3,d1
+
+copy_record_arguments_all_b:
+ movl d1,compact_sp_offset_1(sp)
+
+ movl heap_vector_compact_sp_offset(sp),d1
+
+update_up_list_1r:
+ movl d0,a1
+ addl neg_heap_p3_compact_sp_offset(sp),d0
+
+#ifdef NO_BIT_INSTRUCTIONS
+ pushl a0
+ movl d0,a0
+
+ shrl $7,d0
+ andl $31*4,a0
+
+ movl bit_set_table(,a0,1),a0
+ movl (d1,d0,4),d0
+
+ andl a0,d0
+
+ popl a0
+ je copy_argument_part_1r
+#else
+ shrl $2,d0
+ bt d0,(d1)
+ jnc copy_argument_part_1r
+#endif
+ movl (a1),d0
+ movl a4,(a1)
+ subl $3,d0
+ jmp update_up_list_1r
+
+copy_argument_part_1r:
+ movl (a1),d0
+ movl a4,(a1)
+ movl d0,(a4)
+ addl $4,a4
+
+ movl neg_heap_p3_compact_sp_offset(sp),d0
+ addl a0,d0
+ shr $2,d0
+
+ mov d0,d1
+ andl $31,d1
+ cmp $1,d1
+ jae bit_in_this_word
+
+ movl vector_counter_compact_sp_offset(sp),d0
+ movl vector_p_compact_sp_offset(sp),a1
+ dec d0
+ movl (a1),a3
+ addl $4,a1
+
+ movl neg_heap_vector_plus_4_compact_sp_offset(sp),a2
+ addl a1,a2
+ shl $5,a2
+ addl heap_p3_compact_sp_offset(sp),a2
+
+ movl a1,vector_p_compact_sp_offset(sp)
+ movl d0,vector_counter_compact_sp_offset(sp)
+
+bit_in_this_word:
+#ifdef NO_BIT_INSTRUCTIONS
+ andl bit_clear_table(,d1,4),a3
+#else
+ btr d1,a3
+#endif
+
+ movl compact_sp_offset_1(sp),d1
+
+copy_b_record_argument_part_arguments:
+ movl (a0),d0
+ addl $4,a0
+ movl d0,(a4)
+ addl $4,a4
+ subl $1,d1
+ jnc copy_b_record_argument_part_arguments
+
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+copy_record_arguments_aa:
+ subl $256+2,d1
+ subl a1,d1
+
+ movl d1,compact_sp_offset_1(sp)
+ mov a1,compact_sp_offset_2(sp)
+
+update_up_list_2r:
+ movl d0,a1
+ movl (a1),d0
+ movl $3,d1
+ andl d0,d1
+ subl $3,d1
+ jne copy_argument_part_2r
+
+ movl a4,(a1)
+ subl $3,d0
+ jmp update_up_list_2r
+
+copy_argument_part_2r:
+ movl a4,(a1)
+ cmpl a0,d0
+ jb copy_record_argument_2
+
+ cmpl end_heap_p3_compact_sp_offset(sp),d0
+ jae copy_record_argument_2
+
+ movl d0,a1
+ movl (a1),d0
+ lea 1(a4),d1
+ movl d1,(a1)
+copy_record_argument_2:
+ movl d0,(a4)
+ addl $4,a4
+
+ movl compact_sp_offset_2(sp),d1
+ subl $1,d1
+ jc no_pointers_in_record
+
+copy_record_pointers:
+ movl (a0),a1
+ addl $4,a0
+ cmpl a0,a1
+ jb copy_record_pointers_2
+
+ cmpl end_heap_p3_compact_sp_offset(sp),a1
+ jae copy_record_pointers_2
+
+ movl (a1),d0
+ inc a4
+ movl a4,(a1)
+ dec a4
+ movl d0,a1
+copy_record_pointers_2:
+ movl a1,(a4)
+ addl $4,a4
+ subl $1,d1
+ jnc copy_record_pointers
+
+no_pointers_in_record:
+ movl compact_sp_offset_1(sp),d1
+
+ subl $1,d1
+ jc no_non_pointers_in_record
+
+copy_non_pointers_in_record:
+ movl (a0),d0
+ addl $4,a0
+ movl d0,(a4)
+ addl $4,a4
+ subl $1,d1
+ jnc copy_non_pointers_in_record
+
+no_non_pointers_in_record:
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+no_record_arguments:
+ subl $3,d1
+update_up_list_2:
+ movl d0,a1
+ movl (d0),d0
+ inc d0
+ movl a4,(a1)
+ testb $3,d0b
+ jne copy_argument_part_2
+
+ subl $4,d0
+ jmp update_up_list_2
+
+copy_argument_part_2:
+ dec d0
+ cmpl a0,d0
+ jc copy_arguments_1
+
+ cmpl end_heap_p3_compact_sp_offset(sp),d0
+ jnc copy_arguments_1
+
+ movl d0,a1
+ movl (d0),d0
+ inc a4
+ movl a4,(a1)
+ dec a4
+copy_arguments_1:
+ movl d0,(a4)
+ addl $4,a4
+
+copy_argument_part_arguments:
+ movl (a0),a1
+ addl $4,a0
+ cmpl a0,a1
+ jc copy_arguments_2
+
+ cmpl end_heap_p3_compact_sp_offset(sp),a1
+ jnc copy_arguments_2
+
+ movl (a1),d0
+ inc a4
+ movl a4,(a1)
+ dec a4
+ movl d0,a1
+copy_arguments_2:
+ movl a1,(a4)
+ addl $4,a4
+ subl $1,d1
+ jnc copy_argument_part_arguments
+
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+update_list_2_:
+ dec d0
+update_list_2:
+ movl a4,(a1)
+begin_update_list_2:
+ movl d0,a1
+ movl (d0),d0
+update_list__2:
+ test $1,d0
+ jz end_update_list_2
+ test $2,d0
+ jz update_list_2_
+ lea -3(d0),a1
+ movl -3(d0),d0
+ jmp update_list__2
+
+end_update_list_2:
+ movl a4,(a1)
+
+ movl d0,(a4)
+ addl $4,a4
+
+ testb $2,d0b
+ je move_lazy_node
+
+ movzwl -2(d0),d1
+ testl d1,d1
+ je move_hnf_0
+
+ cmp $256,d1
+ jae move_record
+
+ subl $2,d1
+ jc move_hnf_1
+ je move_hnf_2
+
+move_hnf_3:
+ movl (a0),a1
+ addl $4,a0
+ cmpl a0,a1
+ jc move_hnf_3_1
+
+ cmpl end_heap_p3_compact_sp_offset(sp),a1
+ jnc move_hnf_3_1
+
+ lea 1(a4),d0
+ movl (a1),d1
+ movl d0,(a1)
+ movl d1,a1
+move_hnf_3_1:
+ movl a1,(a4)
+
+ movl (a0),a1
+ addl $4,a0
+ cmpl a0,a1
+ jc move_hnf_3_2
+
+ cmpl end_heap_p3_compact_sp_offset(sp),a1
+ jnc move_hnf_3_2
+
+ lea 4+2+1(a4),d0
+ movl (a1),d1
+ movl d0,(a1)
+ movl d1,a1
+move_hnf_3_2:
+ movl a1,4(a4)
+ addl $8,a4
+
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+move_hnf_2:
+ movl (a0),a1
+ addl $4,a0
+ cmpl a0,a1
+ jc move_hnf_2_1
+
+ cmpl end_heap_p3_compact_sp_offset(sp),a1
+ jnc move_hnf_2_1
+
+ lea 1(a4),d0
+ movl (a1),d1
+ movl d0,(a1)
+ movl d1,a1
+move_hnf_2_1:
+ movl a1,(a4)
+
+ movl (a0),a1
+ addl $4,a0
+ cmpl a0,a1
+ jc move_hnf_2_2
+
+ cmpl end_heap_p3_compact_sp_offset(sp),a1
+ jnc move_hnf_2_2
+
+ lea 4+1(a4),d0
+ movl (a1),d1
+ movl d0,(a1)
+ movl d1,a1
+move_hnf_2_2:
+ movl a1,4(a4)
+ addl $8,a4
+
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+move_hnf_1:
+ movl (a0),a1
+ addl $4,a0
+ cmpl a0,a1
+ jc move_hnf_1_
+
+ cmpl end_heap_p3_compact_sp_offset(sp),a1
+ jnc move_hnf_1_
+
+ lea 1(a4),d0
+ movl (a1),d1
+ movl d0,(a1)
+ movl d1,a1
+move_hnf_1_:
+ movl a1,(a4)
+ addl $4,a4
+
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+move_record:
+ subl $258,d1
+ jb move_record_1
+ je move_record_2
+
+move_record_3:
+ movzwl -2+2(d0),d1
+ subl $1,d1
+ ja move_hnf_3
+
+ movl (a0),a1
+ lea 4(a0),a0
+ jb move_record_3_1b
+
+move_record_3_1a:
+ cmpl a0,a1
+ jb move_record_3_1b
+
+ cmpl end_heap_p3_compact_sp_offset(sp),a1
+ jae move_record_3_1b
+
+ lea 1(a4),d0
+ movl (a1),d1
+ movl d0,(a1)
+ movl d1,a1
+move_record_3_1b:
+ movl a1,(a4)
+ addl $4,a4
+
+ movl (a0),a1
+ addl $4,a0
+ cmpl a0,a1
+ jb move_record_3_2
+
+ cmpl end_heap_p3_compact_sp_offset(sp),a1
+ jae move_record_3_2
+
+ movl neg_heap_p3_compact_sp_offset(sp),d0
+#ifdef NO_BIT_INSTRUCTIONS
+ movl a2,compact_sp_offset_1(sp)
+#endif
+ addl a1,d0
+
+#ifdef NO_BIT_INSTRUCTIONS
+ movl heap_vector_compact_sp_offset(sp),d1
+ addl $4,d0
+ movl d0,a2
+ andl $31*4,a2
+ shrl $7,d0
+ movl bit_set_table(a2),a2
+ testl (d1,d0,4),a2
+ je not_linked_record_argument_part_3_b
+#else
+ shr $2,d0
+ inc d0
+
+ movl heap_vector_compact_sp_offset(sp),d1
+ bts d0,(d1)
+ jnc not_linked_record_argument_part_3_b
+#endif
+
+ movl neg_heap_p3_compact_sp_offset(sp),d0
+ addl a4,d0
+
+#ifdef NO_BIT_INSTRUCTIONS
+ movl d0,a2
+ andl $31*4,a2
+ shrl $7,d0
+ movl bit_set_table(a2),a2
+ orl a2,(d1,d0,4)
+ movl compact_sp_offset_1(sp),a2
+#else
+ shr $2,d0
+ bts d0,(d1)
+#endif
+ jmp linked_record_argument_part_3_b
+
+not_linked_record_argument_part_3_b:
+#ifdef NO_BIT_INSTRUCTIONS
+ orl a2,(d1,d0,4)
+#endif
+ movl neg_heap_p3_compact_sp_offset(sp),d0
+ addl a4,d0
+
+#ifdef NO_BIT_INSTRUCTIONS
+ movl d0,a2
+ andl $31*4,a2
+ shrl $7,d0
+ movl bit_clear_table(a2),a2
+ andl a2,(d1,d0,4)
+ movl compact_sp_offset_1(sp),a2
+#else
+ shr $2,d0
+ btr d0,(d1)
+#endif
+
+linked_record_argument_part_3_b:
+ movl (a1),d1
+ lea 2+1(a4),d0
+ movl d0,(a1)
+ movl d1,a1
+move_record_3_2:
+ movl a1,(a4)
+ addl $4,a4
+
+ movl neg_heap_p3_compact_sp_offset(sp),d1
+ addl a0,d1
+ shr $2,d1
+ dec d1
+ andl $31,d1
+ cmp $2,d1
+ jb bit_in_next_word
+
+#ifdef NO_BIT_INSTRUCTIONS
+ andl bit_clear_table(,d1,4),a3
+#else
+ btr d1,a3
+#endif
+
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+bit_in_next_word:
+ movl vector_counter_compact_sp_offset(sp),d0
+ movl vector_p_compact_sp_offset(sp),a0
+ dec d0
+ movl (a0),a3
+ addl $4,a0
+
+#ifdef NO_BIT_INSTRUCTIONS
+ andl bit_clear_table(,d1,4),a3
+#else
+ btr d1,a3
+#endif
+ testl a3,a3
+ je skip_zeros
+ jmp end_skip_zeros
+
+move_record_2:
+ cmpw $1,-2+2(d0)
+ ja move_hnf_2
+ jb move_real_or_file
+
+move_record_2_ab:
+ movl (a0),a1
+ addl $4,a0
+ cmpl a0,a1
+ jb move_record_2_1
+
+ cmpl end_heap_p3_compact_sp_offset(sp),a1
+ jae move_record_2_1
+
+ lea 1(a4),d0
+ movl (a1),d1
+ movl d0,(a1)
+ movl d1,a1
+move_record_2_1:
+ movl a1,(a4)
+ movl (a0),d1
+ addl $4,a0
+ movl d1,4(a4)
+ addl $8,a4
+
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+move_record_1:
+ movzwl -2+2(d0),d1
+ test d1,d1
+ jne move_hnf_1
+ jmp move_int_bool_or_char
+
+move_real_or_file:
+ movl (a0),d0
+ addl $4,a0
+ movl d0,(a4)
+ addl $4,a4
+move_int_bool_or_char:
+ movl (a0),d0
+ addl $4,a0
+ movl d0,(a4)
+ addl $4,a4
+copy_normal_hnf_0:
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+move_hnf_0:
+ cmpl $INT+2,d0
+ jb move_real_file_string_or_array
+ cmpl $CHAR+2,d0
+ jbe move_int_bool_or_char
+
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+move_real_file_string_or_array:
+ cmpl $__STRING__+2,d0
+ ja move_real_or_file
+ jne move_array
+
+ movl (a0),d0
+ addl $3,d0
+ shr $2,d0
+
+cp_s_arg_lp3:
+ movl (a0),d1
+ addl $4,a0
+ movl d1,(a4)
+ addl $4,a4
+ subl $1,d0
+ jnc cp_s_arg_lp3
+
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+move_array:
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_end_array_bit
+#else
+ bsf a3,d1
+ jne end_array_bit
+#endif
+ movl a0,compact_sp_offset_1(sp)
+
+ movl vector_counter_compact_sp_offset(sp),d0
+ movl vector_p_compact_sp_offset(sp),a0
+
+skip_zeros_a:
+ subl $1,d0
+ movl (a0),a3
+ addl $4,a0
+ testl a3,a3
+ je skip_zeros_a
+
+ movl neg_heap_vector_plus_4_compact_sp_offset(sp),a2
+ addl a0,a2
+ movl d0,vector_counter_compact_sp_offset(sp)
+
+ shl $5,a2
+ movl a0,vector_p_compact_sp_offset(sp)
+
+ addl heap_p3_compact_sp_offset(sp),a2
+
+ movl compact_sp_offset_1(sp),a0
+
+#ifdef NO_BIT_INSTRUCTIONS
+bsf_and_end_array_bit:
+ movl a3,d0
+ movl a3,a1
+ andl $0xff,d0
+ jne a_found_bit1
+ andl $0xff00,a1
+ jne a_found_bit2
+ movl a3,d0
+ movl a3,a1
+ andl $0xff0000,d0
+ jne a_found_bit3
+ shrl $24,a1
+ movzbl first_one_bit_table(,a1,1),d1
+ addl $24,d1
+ jmp end_array_bit
+a_found_bit3:
+ shrl $16,d0
+ movzbl first_one_bit_table(,d0,1),d1
+ addl $16,d1
+ jmp end_array_bit
+a_found_bit2:
+ shrl $8,a1
+ movzbl first_one_bit_table(,a1,1),d1
+ addl $8,d1
+ jmp end_array_bit
+a_found_bit1:
+ movzbl first_one_bit_table(,d0,1),d1
+
+#else
+ bsf a3,d1
+#endif
+
+end_array_bit:
+#ifdef NO_BIT_INSTRUCTIONS
+ andl bit_clear_table(,d1,4),a3
+#else
+ btr d1,a3
+#endif
+ leal (a2,d1,4),d1
+
+ cmpl d1,a0
+ jne move_a_array
+
+move_b_array:
+ movl (a0),a1
+ movl a1,(a4)
+ movl 4(a0),d1
+ addl $4,a0
+ movzwl -2(d1),d0
+ addl $4,a4
+ test d0,d0
+ je move_strict_basic_array
+
+ subl $256,d0
+ imull d0,a1
+ movl a1,d0
+ jmp cp_s_arg_lp3
+
+move_strict_basic_array:
+ movl a1,d0
+ cmpl $INT+2,d1
+ je cp_s_arg_lp3
+
+ cmpl $BOOL+2,d1
+ je move_bool_array
+
+ addl d0,d0
+ jmp cp_s_arg_lp3
+
+move_bool_array:
+ addl $3,d0
+ shr $2,d0
+ jmp cp_s_arg_lp3
+
+move_a_array:
+ movl d1,a1
+ subl a0,d1
+ shr $2,d1
+
+ pushl a3
+
+ subl $1,d1
+ jb end_array
+
+ movl (a0),a3
+ movl -4(a1),d0
+ movl a3,-4(a1)
+ movl d0,(a4)
+ movl (a1),d0
+ movl 4(a0),a3
+ addl $8,a0
+ movl a3,(a1)
+ movl d0,4(a4)
+ addl $8,a4
+ test d0,d0
+ je st_move_array_lp
+
+ movzwl -2+2(d0),a3
+ movzwl -2(d0),d0
+ subl $256,d0
+ cmpl a3,d0
+ je st_move_array_lp
+
+move_array_ab:
+ pushl a0
+
+ movl -8(a4),a1
+ movl a3,d1
+ imull d0,a1
+ shl $2,a1
+
+ subl d1,d0
+ addl a0,a1
+ call reorder
+
+ popl a0
+ subl $1,d1
+ subl $1,d0
+
+ pushl d1
+ pushl d0
+ pushl -8(a4)
+ jmp st_move_array_lp_ab
+
+move_array_ab_lp1:
+ movl 8(sp),d0
+move_array_ab_a_elements:
+ movl (a0),d1
+ addl $4,a0
+ cmpl a0,d1
+ jb move_array_element_ab
+
+ cmpl end_heap_p3_compact_sp_offset+16(sp),d1
+ jnc move_array_element_ab
+
+ movl d1,a1
+ movl (a1),d1
+ inc a4
+ movl a4,(a1)
+ dec a4
+move_array_element_ab:
+ movl d1,(a4)
+ addl $4,a4
+ subl $1,d0
+ jnc move_array_ab_a_elements
+
+ movl 4(sp),d0
+move_array_ab_b_elements:
+ movl (a0),d1
+ addl $4,a0
+ movl d1,(a4)
+ addl $4,a4
+ subl $1,d0
+ jnc move_array_ab_b_elements
+
+st_move_array_lp_ab:
+ subl $1,(sp)
+ jnc move_array_ab_lp1
+
+ addl $12,sp
+ jmp end_array
+
+move_array_lp1:
+ movl (a0),d0
+ addl $4,a0
+ addl $4,a4
+ cmpl a0,d0
+ jb move_array_element
+
+ cmpl end_heap_p3_compact_sp_offset+4(sp),d0
+ jnc move_array_element
+
+ movl (d0),a3
+ movl d0,a1
+ movl a3,-4(a4)
+ leal -4+1(a4),d0
+ movl d0,(a1)
+
+ subl $1,d1
+ jnc move_array_lp1
+
+ jmp end_array
+
+move_array_element:
+ movl d0,-4(a4)
+st_move_array_lp:
+ subl $1,d1
+ jnc move_array_lp1
+
+end_array:
+ popl a3
+
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+move_lazy_node:
+ movl d0,a1
+ movl -4(a1),d1
+ test d1,d1
+ je move_lazy_node_0
+
+ subl $1,d1
+ jle move_lazy_node_1
+
+ cmpl $256,d1
+ jge move_closure_with_unboxed_arguments
+
+move_lazy_node_arguments:
+ movl (a0),a1
+ addl $4,a0
+ cmpl a0,a1
+ jc move_lazy_node_arguments_
+
+ cmpl end_heap_p3_compact_sp_offset(sp),a1
+ jnc move_lazy_node_arguments_
+
+ movl (a1),d0
+ movl d0,(a4)
+ lea 1(a4),d0
+ addl $4,a4
+ movl d0,(a1)
+ subl $1,d1
+ jnc move_lazy_node_arguments
+
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+move_lazy_node_arguments_:
+ movl a1,(a4)
+ addl $4,a4
+ subl $1,d1
+ jnc move_lazy_node_arguments
+
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+move_lazy_node_1:
+ movl (a0),a1
+ addl $4,a0
+ cmpl a0,a1
+ jc move_lazy_node_1_
+
+ cmpl end_heap_p3_compact_sp_offset(sp),a1
+ jnc move_lazy_node_1_
+
+ lea 1(a4),d0
+ movl (a1),d1
+ movl d0,(a1)
+ movl d1,a1
+move_lazy_node_1_:
+ movl a1,(a4)
+ addl $8,a4
+
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+move_lazy_node_0:
+ addl $8,a4
+
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+move_closure_with_unboxed_arguments:
+ je move_closure_with_unboxed_arguments_1
+ addl $1,d1
+ movl d1,d0
+ andl $255,d1
+ shrl $8,d0
+ subl d0,d1
+ je move_non_pointers_of_closure
+
+ movl d0,compact_sp_offset_1(sp)
+
+move_closure_with_unboxed_arguments_lp:
+ movl (a0),a1
+ addl $4,a0
+ cmpl a0,a1
+ jc move_closure_with_unboxed_arguments_
+
+ cmpl end_heap_p3_compact_sp_offset(sp),a1
+ jnc move_closure_with_unboxed_arguments_
+
+ movl (a1),d0
+ movl d0,(a4)
+ lea 1(a4),d0
+ addl $4,a4
+ movl d0,(a1)
+ subl $1,d1
+ jne move_closure_with_unboxed_arguments_lp
+
+ movl compact_sp_offset_1(sp),d0
+ jmp move_non_pointers_of_closure
+
+move_closure_with_unboxed_arguments_:
+ movl a1,(a4)
+ addl $4,a4
+ subl $1,d1
+ jne move_closure_with_unboxed_arguments_lp
+
+ movl compact_sp_offset_1(sp),d0
+
+move_non_pointers_of_closure:
+ movl (a0),d1
+ addl $4,a0
+ movl d1,(a4)
+ addl $4,a4
+ subl $1,d0
+ jne move_non_pointers_of_closure
+
+#ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+#else
+ bsf a3,d1
+ jne copy_nodes
+#endif
+ jmp find_non_zero_long
+
+move_closure_with_unboxed_arguments_1:
+ movl (a0),d0
+ movl d0,(a4)
+ addl $8,a4
+# ifdef NO_BIT_INSTRUCTIONS
+ test a3,a3
+ jne bsf_and_copy_nodes
+# else
+ bsf a3,d1
+ jne copy_nodes
+# endif
+ jmp find_non_zero_long
+
+end_copy:
+
+#ifdef FINALIZERS
+ movl finalizer_list,a0
+
+restore_finalizer_descriptors:
+ cmpl $__Nil-4,a0
+ je end_restore_finalizer_descriptors
+
+ movl $e____system__kFinalizer+2,(a0)
+ movl 4(a0),a0
+ jmp restore_finalizer_descriptors
+
+end_restore_finalizer_descriptors:
+#endif
+
+ movl a4,a2
+ movl a4_compact_sp_offset(sp),a4
+ lea 40(sp),sp
diff --git a/thread/icompact_rmark.s b/thread/icompact_rmark.s
new file mode 100644
index 0000000..f61535f
--- /dev/null
+++ b/thread/icompact_rmark.s
@@ -0,0 +1,955 @@
+
+rmark_stack_nodes1:
+ movl (a0),d1
+ lea 1(a3),d0
+ movl d1,(a3)
+ movl d0,(a0)
+
+rmark_next_stack_node:
+ addl $4,a3
+
+rmark_stack_nodes:
+ cmpl end_vector_offset(a4),a3
+ je end_rmark_nodes
+
+rmark_more_stack_nodes:
+ movl (a3),a0
+ movl neg_heap_p3_offset(a4),d1
+ movl heap_vector_d4_offset(a4),a2
+
+ addl a0,d1
+ cmpl heap_size_32_33_offset(a4),d1
+ jnc rmark_next_stack_node
+
+ movl $31*4,d0
+ andl d1,d0
+ shrl $7,d1
+ addl a2,d1
+
+ movl bit_set_table(d0),d0
+ movl (,d1,4),a2
+ test d0,a2
+ jne rmark_stack_nodes1
+
+ orl d0,a2
+ movl a2,(,d1,4)
+
+ movl (a0),d0
+ call rmark_stack_node
+
+ addl $4,a3
+ cmpl end_vector_offset(a4),a3
+ jne rmark_more_stack_nodes
+ ret
+
+rmark_stack_node:
+ subl $8,sp
+ movl d0,(a3)
+ lea 1(a3),a2
+ movl a3,4(sp)
+ movl $-1,d1
+ movl $0,(sp)
+ movl a2,(a0)
+ jmp rmark_no_reverse
+
+rmark_node_d1:
+ movl neg_heap_p3_offset(a4),d0
+ movl heap_vector_d4_offset(a4),a2
+ addl a0,d0
+ cmpl heap_size_32_33_offset(a4),d0
+ jnc rmark_next_node
+
+ jmp rmark_node_
+
+rmark_hnf_2:
+ leal 4(a0),d1
+ movl 4(a0),d0
+ subl $8,sp
+
+ movl a0,a3
+ movl (a0),a0
+
+ movl d1,4(sp)
+ movl d0,(sp)
+
+ cmpl end_stack_offset(a4),sp
+ jb rmark_using_reversal
+
+rmark_node:
+ movl neg_heap_p3_offset(a4),d0
+ movl heap_vector_d4_offset(a4),a2
+ addl a0,d0
+ cmpl heap_size_32_33_offset(a4),d0
+ jnc rmark_next_node
+
+ movl a3,d1
+
+rmark_node_:
+ movl $31*4,a1
+ andl d0,a1
+ shrl $7,d0
+ addl a2,d0
+
+ movl bit_set_table(a1),a1
+ movl (,d0,4),a2
+ test a1,a2
+ jne rmark_reverse_and_mark_next_node
+
+ orl a1,a2
+ movl a2,(,d0,4)
+
+ movl (a0),d0
+rmark_arguments:
+ cmpl d1,a0
+ ja rmark_no_reverse
+
+ lea 1(a3),a2
+ movl d0,(a3)
+ movl a2,(a0)
+
+rmark_no_reverse:
+ test $2,d0
+ je rmark_lazy_node
+
+ movzwl -2(d0),a2
+ test a2,a2
+ je rmark_hnf_0
+
+ addl $4,a0
+
+ cmp $256,a2
+ jae rmark_record
+
+ subl $2,a2
+ je rmark_hnf_2
+ jc rmark_hnf_1
+
+rmark_hnf_3:
+ movl 4(a0),a1
+rmark_hnf_3_:
+ cmpl end_stack_offset(a4),sp
+ jb rmark_using_reversal_
+
+ movl neg_heap_p3_offset(a4),d1
+ addl a1,d1
+
+ movl $31*4,d0
+ andl d1,d0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(d0),d0
+ test (,d1,4),d0
+ jne rmark_shared_argument_part
+
+ orl d0,(,d1,4)
+
+rmark_no_shared_argument_part:
+ subl $8,sp
+ movl a0,4(sp)
+ lea 4(a0),a3
+ movl (a0),a0
+ lea (a1,a2,4),a1
+ movl a0,(sp)
+
+rmark_push_hnf_args:
+ movl (a1),d1
+ subl $8,sp
+ movl a1,4(sp)
+ subl $4,a1
+ movl d1,(sp)
+
+ subl $1,a2
+ jg rmark_push_hnf_args
+
+ movl (a1),a0
+
+ cmpl a3,a1
+ ja rmark_no_reverse_argument_pointer
+
+ lea 3(a3),a2
+ movl a0,(a3)
+ movl a2,(a1)
+
+ movl neg_heap_p3_offset(a4),d0
+ movl heap_vector_d4_offset(a4),a2
+ addl a0,d0
+ cmpl heap_size_32_33_offset(a4),d0
+ jnc rmark_next_node
+
+ movl a1,d1
+ jmp rmark_node_
+
+rmark_no_reverse_argument_pointer:
+ movl a1,a3
+ jmp rmark_node
+
+rmark_shared_argument_part:
+ cmpl a0,a1
+ ja rmark_hnf_1
+
+ movl (a1),d1
+ leal 4+2+1(a0),d0
+ movl d0,(a1)
+ movl d1,4(a0)
+ jmp rmark_hnf_1
+
+rmark_record:
+ subl $258,a2
+ je rmark_record_2
+ jb rmark_record_1
+
+rmark_record_3:
+ movzwl -2+2(d0),a2
+ movl 4(a0),a1
+ subl $1,a2
+ jb rmark_record_3_bb
+ je rmark_record_3_ab
+ subl $1,a2
+ je rmark_record_3_aab
+ jmp rmark_hnf_3_
+
+rmark_record_3_bb:
+ subl $4,a0
+
+ movl neg_heap_p3_offset(a4),a2
+ addl a1,a2
+
+ movl $31*4,d0
+ andl a2,d0
+ shrl $7,a2
+ addl heap_vector_d4_offset(a4),a2
+
+ movl bit_set_table(d0),d0
+ orl d0,(,a2,4)
+
+ cmpl a0,a1
+ ja rmark_next_node
+
+ add d0,d0
+ jne rmark_bit_in_same_word1
+ inc a2
+ mov $1,d0
+rmark_bit_in_same_word1:
+ testl (,a2,4),d0
+ je rmark_not_yet_linked_bb
+
+ movl neg_heap_p3_offset(a4),a2
+ addl a0,a2
+
+ addl $2*4,a2
+
+ movl $31*4,d0
+ andl a2,d0
+ shrl $7,a2
+ addl heap_vector_d4_offset(a4),a2
+
+ movl bit_set_table(d0),d0
+ orl d0,(,a2,4)
+
+ movl (a1),a2
+ lea 8+2+1(a0),d0
+ movl a2,8(a0)
+ movl d0,(a1)
+ jmp rmark_next_node
+
+rmark_not_yet_linked_bb:
+ orl d0,(,a2,4)
+ movl (a1),a2
+ lea 8+2+1(a0),d0
+ movl a2,8(a0)
+ movl d0,(a1)
+ jmp rmark_next_node
+
+rmark_record_3_ab:
+ movl neg_heap_p3_offset(a4),a2
+ addl a1,a2
+
+ movl $31*4,d0
+ andl a2,d0
+ shrl $7,a2
+ addl heap_vector_d4_offset(a4),a2
+
+ movl bit_set_table(d0),d0
+ orl d0,(,a2,4)
+
+ cmpl a0,a1
+ ja rmark_hnf_1
+
+ add d0,d0
+ jne rmark_bit_in_same_word2
+ inc a2
+ mov $1,d0
+rmark_bit_in_same_word2:
+ testl (,a2,4),d0
+ je rmark_not_yet_linked_ab
+
+ movl neg_heap_p3_offset(a4),a2
+ addl a0,a2
+
+ addl $4,a2
+
+ movl $31*4,d0
+ andl a2,d0
+ shrl $7,a2
+ addl heap_vector_d4_offset(a4),a2
+
+ movl bit_set_table(d0),d0
+ orl d0,(,a2,4)
+
+ movl (a1),a2
+ lea 4+2+1(a0),d0
+ movl a2,4(a0)
+ movl d0,(a1)
+ jmp rmark_hnf_1
+
+rmark_not_yet_linked_ab:
+ orl d0,(,a2,4)
+ movl (a1),a2
+ lea 4+2+1(a0),d0
+ movl a2,4(a0)
+ movl d0,(a1)
+ jmp rmark_hnf_1
+
+rmark_record_3_aab:
+ cmpl end_stack_offset(a4),sp
+ jb rmark_using_reversal_
+
+ movl neg_heap_p3_offset(a4),a2
+ addl a1,a2
+
+ movl $31*4,d0
+ andl a2,d0
+ shrl $7,a2
+ addl heap_vector_d4_offset(a4),a2
+
+ movl bit_set_table(d0),d0
+ testl (,a2,4),d0
+ jne rmark_shared_argument_part
+
+ orl d0,(,a2,4)
+
+ subl $8,sp
+ movl a0,4(sp)
+ lea 4(a0),a3
+ movl (a0),a0
+ movl a0,(sp)
+
+ movl (a1),a0
+
+ cmpl a3,a1
+ ja rmark_no_reverse_argument_pointer
+
+ lea 3(a3),a2
+ movl a0,(a3)
+ movl a2,(a1)
+
+ movl neg_heap_p3_offset(a4),d0
+ movl heap_vector_d4_offset(a4),a2
+ addl a0,d0
+ cmpl heap_size_32_33_offset(a4),d0
+ jnc rmark_next_node
+
+ movl a1,d1
+ jmp rmark_node_
+
+rmark_record_2:
+ cmpw $1,-2+2(d0)
+ ja rmark_hnf_2
+ je rmark_hnf_1
+ jmp rmark_next_node
+
+rmark_record_1:
+ cmpw $0,-2+2(d0)
+ jne rmark_hnf_1
+ jmp rmark_next_node
+
+rmark_lazy_node_1:
+/ selectors:
+ jne rmark_selector_node_1
+
+rmark_hnf_1:
+ movl a0,a3
+ movl (a0),a0
+ jmp rmark_node
+
+/ selectors
+rmark_indirection_node:
+ movl neg_heap_p3_offset(a4),a1
+
+ subl $4,a0
+
+ addl a0,a1
+
+ movl $31*4,a2
+ andl a1,a2
+ shrl $7,a1
+ addl heap_vector_d4_offset(a4),a1
+
+ movl bit_clear_table(a2),a2
+ andl a2,(,a1,4)
+
+ movl a0,a1
+ cmpl d1,a0
+ movl 4(a0),a0
+ movl a0,(a3)
+ ja rmark_node_d1
+ movl d0,(a1)
+ jmp rmark_node_d1
+
+rmark_selector_node_1:
+ addl $3,a2
+ je rmark_indirection_node
+
+ movl (a0),a1
+ movl d1,pointer_compare_address
+
+ movl neg_heap_p3_offset(a4),d1
+ addl a1,d1
+
+ addl $1,a2
+ jle rmark_record_selector_node_1
+
+ movl $31*4,a2
+ andl d1,a2
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(,a2),a2
+ movl (,d1,4),d1
+ andl a2,d1
+ jne rmark_hnf_1
+
+ movl (a1),d1
+ testb $2,d1b
+ je rmark_hnf_1
+
+ cmpw $2,-2(d1)
+ jbe rmark_small_tuple_or_record
+
+rmark_large_tuple_or_record:
+ movl 8(a1),d1
+ addl neg_heap_p3_offset(a4),d1
+
+ movl $31*4,a2
+ andl d1,a2
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(,a2),a2
+ movl (,d1,4),d1
+ andl a2,d1
+ jne rmark_hnf_1
+
+ movl neg_heap_p3_offset(a4),d1
+ lea -4(a0,d1),d1
+
+ pushl a0
+
+ movl -8(d0),d0
+
+ movl $31*4,a0
+ andl d1,a0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_clear_table(a0),a0
+ andl a0,(,d1,4)
+
+ movzwl 4(d0),d0
+ movl pointer_compare_address,d1
+
+ cmpl $8,d0
+ jl rmark_tuple_or_record_selector_node_2
+ movl 8(a1),a1
+ je rmark_tuple_selector_node_2
+ movl -12(a1,d0),a0
+ pop a1
+ movl a0,(a3)
+ movl $__indirection,-4(a1)
+ movl a0,(a1)
+ jmp rmark_node_d1
+
+rmark_tuple_selector_node_2:
+ movl (a1),a0
+ pop a1
+ movl a0,(a3)
+ movl $__indirection,-4(a1)
+ movl a0,(a1)
+ jmp rmark_node_d1
+
+rmark_record_selector_node_1:
+ je rmark_strict_record_selector_node_1
+
+ movl $31*4,a2
+ andl d1,a2
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(,a2),a2
+ movl (,d1,4),d1
+ andl a2,d1
+ jne rmark_hnf_1
+
+ movl (a1),d1
+ testb $2,d1b
+ je rmark_hnf_1
+
+ cmpw $258,-2(d1)
+ jbe rmark_small_tuple_or_record
+
+ movl 8(a1),d1
+ addl neg_heap_p3_offset(a4),d1
+
+ movl $31*4,a2
+ andl d1,a2
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(,a2),a2
+ movl (,d1,4),d1
+ andl a2,d1
+ jne rmark_hnf_1
+
+rmark_small_tuple_or_record:
+ movl neg_heap_p3_offset(a4),d1
+ lea -4(a0,d1),d1
+
+ pushl a0
+
+ movl -8(d0),d0
+
+ movl $31*4,a0
+ andl d1,a0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_clear_table(a0),a0
+ andl a0,(,d1,4)
+
+ movzwl 4(d0),d0
+ movl pointer_compare_address,d1
+
+ cmpl $8,d0
+ jle rmark_tuple_or_record_selector_node_2
+ movl 8(a1),a1
+ subl $12,d0
+rmark_tuple_or_record_selector_node_2:
+ movl (a1,d0),a0
+ pop a1
+ movl a0,(a3)
+ movl $__indirection,-4(a1)
+ movl a0,(a1)
+ jmp rmark_node_d1
+
+rmark_strict_record_selector_node_1:
+ movl $31*4,a2
+ andl d1,a2
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(,a2),a2
+ movl (,d1,4),d1
+ andl a2,d1
+ jne rmark_hnf_1
+
+ movl (a1),d1
+ testb $2,d1b
+ je rmark_hnf_1
+
+ cmpw $258,-2(d1)
+ jbe rmark_select_from_small_record
+
+ movl 8(a1),d1
+ addl neg_heap_p3_offset(a4),d1
+
+ movl $31*4,a2
+ andl d1,a2
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(a2),a2
+ movl (,d1,4),d1
+ andl a2,d1
+ jne rmark_hnf_1
+
+rmark_select_from_small_record:
+ movl -8(d0),d1
+ subl $4,a0
+
+ cmpl pointer_compare_address,a0
+ ja rmark_selector_pointer_not_reversed
+
+ movzwl 4(d1),d0
+ cmpl $8,d0
+ jle rmark_strict_record_selector_node_2
+ addl 8(a1),d0
+ movl -12(d0),d0
+ jmp rmark_strict_record_selector_node_3
+rmark_strict_record_selector_node_2:
+ movl (a1,d0),d0
+rmark_strict_record_selector_node_3:
+ movl d0,4(a0)
+
+ movzwl 6(d1),d0
+ testl d0,d0
+ je rmark_strict_record_selector_node_5
+ cmpl $8,d0
+ jle rmark_strict_record_selector_node_4
+ movl 8(a1),a1
+ subl $12,d0
+rmark_strict_record_selector_node_4:
+ movl (a1,d0),d0
+ movl d0,8(a0)
+rmark_strict_record_selector_node_5:
+
+ movl -4(d1),d0
+
+ addl $1,a3
+ movl a3,(a0)
+ movl d0,-1(a3)
+ jmp rmark_next_node
+
+rmark_selector_pointer_not_reversed:
+ movzwl 4(d1),d0
+ cmpl $8,d0
+ jle rmark_strict_record_selector_node_6
+ addl 8(a1),d0
+ movl -12(d0),d0
+ jmp rmark_strict_record_selector_node_7
+rmark_strict_record_selector_node_6:
+ movl (a1,d0),d0
+rmark_strict_record_selector_node_7:
+ movl d0,4(a0)
+
+ movzwl 6(d1),d0
+ testl d0,d0
+ je rmark_strict_record_selector_node_9
+ cmpl $8,d0
+ jle rmark_strict_record_selector_node_8
+ movl 8(a1),a1
+ subl $12,d0
+rmark_strict_record_selector_node_8:
+ movl (a1,d0),d0
+ movl d0,8(a0)
+rmark_strict_record_selector_node_9:
+
+ movl -4(d1),d0
+ movl d0,(a0)
+ jmp rmark_next_node
+
+rmark_reverse_and_mark_next_node:
+ cmpl d1,a0
+ ja rmark_next_node
+
+ movl (a0),d0
+ movl d0,(a3)
+ addl $1,a3
+ movl a3,(a0)
+
+/ a2,d1: free
+
+rmark_next_node:
+ movl (sp),a0
+ movl 4(sp),a3
+ addl $8,sp
+
+ cmpl $1,a0
+ ja rmark_node
+
+rmark_next_node_:
+end_rmark_nodes:
+ ret
+
+rmark_lazy_node:
+ movl -4(d0),a2
+ test a2,a2
+ je rmark_next_node
+
+ addl $4,a0
+
+ subl $1,a2
+ jle rmark_lazy_node_1
+
+ cmpl $255,a2
+ jge rmark_closure_with_unboxed_arguments
+
+rmark_closure_with_unboxed_arguments_:
+ lea (a0,a2,4),a0
+
+rmark_push_lazy_args:
+ movl (a0),d1
+ subl $8,sp
+ movl a0,4(sp)
+ subl $4,a0
+ movl d1,(sp)
+ subl $1,a2
+ jg rmark_push_lazy_args
+
+ movl a0,a3
+ movl (a0),a0
+ cmpl end_stack_offset(a4),sp
+ jae rmark_node
+
+ jmp rmark_using_reversal
+
+rmark_closure_with_unboxed_arguments:
+/ (a_size+b_size)+(b_size<<8)
+/ addl $1,a2
+ movl a2,d0
+ andl $255,a2
+ shrl $8,d0
+ subl d0,a2
+/ subl $1,a2
+ jg rmark_closure_with_unboxed_arguments_
+ je rmark_hnf_1
+ jmp rmark_next_node
+
+rmark_hnf_0:
+ cmpl $INT+2,d0
+ je rmark_int_3
+
+ cmpl $CHAR+2,d0
+ je rmark_char_3
+
+ jb rmark_no_normal_hnf_0
+
+ movl neg_heap_p3_offset(a4),a2
+ addl a0,a2
+
+ movl $31*4,a1
+ andl a2,a1
+ shrl $7,a2
+ addl heap_vector_d4_offset(a4),a2
+
+ movl bit_clear_table(a1),a1
+ andl a1,(,a2,4)
+
+ lea ZERO_ARITY_DESCRIPTOR_OFFSET-2(d0),a1
+ movl a1,(a3)
+ cmpl d1,a0
+ ja rmark_next_node
+ movl d0,(a0)
+ jmp rmark_next_node
+
+rmark_int_3:
+ movl 4(a0),a2
+ cmpl $33,a2
+ jnc rmark_next_node
+
+ lea small_integers(,a2,8),a1
+ movl neg_heap_p3_offset(a4),a2
+ movl a1,(a3)
+ addl a0,a2
+
+ movl $31*4,a1
+ andl a2,a1
+ shrl $7,a2
+ addl heap_vector_d4_offset(a4),a2
+
+ movl bit_clear_table(a1),a1
+ andl a1,(,a2,4)
+
+ cmpl d1,a0
+ ja rmark_next_node
+ movl d0,(a0)
+ jmp rmark_next_node
+
+rmark_char_3:
+ movzbl 4(a0),a1
+ movl neg_heap_p3_offset(a4),a2
+
+ lea static_characters(,a1,8),a1
+ addl a0,a2
+
+ movl a1,(a3)
+
+ movl $31*4,a1
+ andl a2,a1
+ shrl $7,a2
+ addl heap_vector_d4_offset(a4),a2
+
+ movl bit_clear_table(a1),a1
+ andl a1,(,a2,4)
+
+ cmpl d1,a0
+ ja rmark_next_node
+ movl d0,(a0)
+ jmp rmark_next_node
+
+rmark_no_normal_hnf_0:
+ cmpl $__ARRAY__+2,d0
+ jne rmark_next_node
+
+ movl 8(a0),d0
+ test d0,d0
+ je rmark_lazy_array
+
+ movzwl -2+2(d0),a1
+ test a1,a1
+ je rmark_b_array
+
+ movzwl -2(d0),d0
+ test d0,d0
+ je rmark_b_array
+
+ cmpl end_stack_offset(a4),sp
+ jb rmark_array_using_reversal
+
+ subl $256,d0
+ cmpl d0,a1
+ movl a1,d1
+ je rmark_a_record_array
+
+rmark_ab_record_array:
+ movl 4(a0),a1
+ addl $8,a0
+ pushl a0
+
+ imull d0,a1
+ shl $2,a1
+
+ subl d1,d0
+ addl $4,a0
+ addl a0,a1
+ call reorder
+
+ popl a0
+ movl d1,d0
+ imull -4(a0),d0
+ jmp rmark_lr_array
+
+rmark_b_array:
+ movl neg_heap_p3_offset(a4),a2
+ addl a0,a2
+
+ addl $4,a2
+
+ movl $31*4,d0
+ andl a2,d0
+ shrl $7,a2
+ addl heap_vector_d4_offset(a4),a2
+
+ movl bit_set_table(d0),d0
+ orl d0,(,a2,4)
+ jmp rmark_next_node
+
+rmark_a_record_array:
+ movl 4(a0),d0
+ addl $8,a0
+ cmpl $2,d1
+ jb rmark_lr_array
+
+ imull d1,d0
+ jmp rmark_lr_array
+
+rmark_lazy_array:
+ cmpl end_stack_offset(a4),sp
+ jb rmark_array_using_reversal
+
+ movl 4(a0),d0
+ addl $8,a0
+
+rmark_lr_array:
+ movl neg_heap_p3_offset(a4),a1
+ addl a0,a1
+ shrl $2,a1
+ addl d0,a1
+
+ movl $31,d1
+ andl a1,d1
+ shrl $5,a1
+ addl heap_vector_d4_offset(a4),a1
+
+ movl bit_set_table(,d1,4),d1
+ orl d1,(,a1,4)
+
+ cmpl $1,d0
+ jbe rmark_array_length_0_1
+ movl a0,a1
+ lea (a0,d0,4),a0
+
+ movl (a0),d0
+ movl (a1),d1
+ movl d0,(a1)
+ movl d1,(a0)
+
+ movl -4(a0),d0
+ subl $4,a0
+ movl -4(a1),d1
+ subl $4,a1
+ movl d1,(a0)
+ movl d0,(a1)
+ pushl a0
+ movl a1,a3
+ jmp rmark_array_nodes
+
+rmark_array_nodes1:
+ cmpl a3,a0
+ ja rmark_next_array_node
+
+ movl (a0),d1
+ lea 1(a3),d0
+ movl d1,(a3)
+ movl d0,(a0)
+
+rmark_next_array_node:
+ addl $4,a3
+ cmpl (sp),a3
+ je end_rmark_array_node
+
+rmark_array_nodes:
+ movl (a3),a0
+
+ movl neg_heap_p3_offset(a4),d1
+ addl a0,d1
+ cmpl heap_size_32_33_offset(a4),d1
+ jnc rmark_next_array_node
+
+ movl $31*4,d0
+ andl d1,d0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(d0),d0
+ movl (,d1,4),a2
+ test d0,a2
+ jne rmark_array_nodes1
+
+ orl d0,a2
+ movl a2,(,d1,4)
+
+ movl (a0),d0
+ call rmark_array_node
+
+ addl $4,a3
+ cmpl (sp),a3
+ jne rmark_array_nodes
+
+end_rmark_array_node:
+ addl $4,sp
+ jmp rmark_next_node
+
+rmark_array_node:
+ subl $8,sp
+ movl a3,4(sp)
+ movl a3,d1
+ movl $1,(sp)
+ jmp rmark_arguments
+
+rmark_array_length_0_1:
+ lea -8(a0),a0
+ jb rmark_next_node
+
+ movl 12(a0),d1
+ movl 8(a0),a2
+ movl a2,12(a0)
+ movl 4(a0),a2
+ movl a2,8(a0)
+ movl d1,4(a0)
+ addl $4,a0
+ jmp rmark_hnf_1
+
+ .data
+pointer_compare_address: .long 0
+ .text
diff --git a/thread/icompact_rmarkr.s b/thread/icompact_rmarkr.s
new file mode 100644
index 0000000..6488eac
--- /dev/null
+++ b/thread/icompact_rmarkr.s
@@ -0,0 +1,976 @@
+
+rmark_using_reversal:
+ pushl a3
+ pushl a3
+ movl $1,a3
+ jmp rmarkr_node
+
+rmark_using_reversal_:
+ subl $4,a0
+ pushl d1
+ pushl a3
+ cmpl d1,a0
+ ja rmark_no_undo_reverse_1
+ movl a0,(a3)
+ movl d0,(a0)
+rmark_no_undo_reverse_1:
+ movl $1,a3
+ jmp rmarkr_arguments
+
+rmark_array_using_reversal:
+ pushl d1
+ pushl a3
+ cmpl d1,a0
+ ja rmark_no_undo_reverse_2
+ movl a0,(a3)
+ movl $__ARRAY__+2,(a0)
+rmark_no_undo_reverse_2:
+ movl $1,a3
+ jmp rmarkr_arguments
+
+rmarkr_hnf_2:
+ orl $2,(a0)
+ movl 4(a0),a2
+ movl a3,4(a0)
+ leal 4(a0),a3
+ movl a2,a0
+
+rmarkr_node:
+ movl neg_heap_p3_offset(a4),d1
+ movl heap_vector_d4_offset(a4),a2
+ addl a0,d1
+ cmpl heap_size_32_33_offset(a4),d1
+ jnc rmarkr_next_node_after_static
+
+ movl $31*4,d0
+ andl d1,d0
+ shrl $7,d1
+ addl a2,d1
+
+ movl bit_set_table(d0),d0
+ movl (,d1,4),a2
+ test d0,a2
+ jne rmarkr_next_node
+
+ orl d0,a2
+ movl a2,(,d1,4)
+
+rmarkr_arguments:
+ movl (a0),d0
+ testb $2,d0b
+ je rmarkr_lazy_node
+
+ movzwl -2(d0),a2
+ test a2,a2
+ je rmarkr_hnf_0
+
+ addl $4,a0
+
+ cmp $256,a2
+ jae rmarkr_record
+
+ subl $2,a2
+ je rmarkr_hnf_2
+ jc rmarkr_hnf_1
+
+rmarkr_hnf_3:
+ movl 4(a0),a1
+
+ movl neg_heap_p3_offset(a4),d1
+ addl a1,d1
+
+ movl $31*4,d0
+ andl d1,d0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(d0),d0
+ test (,d1,4),d0
+ jne rmarkr_shared_argument_part
+
+ orl d0,(,d1,4)
+
+rmarkr_no_shared_argument_part:
+ orl $2,(a0)
+ movl a3,4(a0)
+ addl $4,a0
+
+ orl $1,(a1)
+ leal (a1,a2,4),a1
+
+ movl (a1),a2
+ movl a0,(a1)
+ movl a1,a3
+ movl a2,a0
+ jmp rmarkr_node
+
+rmarkr_shared_argument_part:
+ cmpl a0,a1
+ ja rmarkr_hnf_1
+
+ movl (a1),d1
+ leal 4+2+1(a0),d0
+ movl d0,(a1)
+ movl d1,4(a0)
+ jmp rmarkr_hnf_1
+
+rmarkr_record:
+ subl $258,a2
+ je rmarkr_record_2
+ jb rmarkr_record_1
+
+rmarkr_record_3:
+ movzwl -2+2(d0),a2
+ subl $1,a2
+ jb rmarkr_record_3_bb
+ je rmarkr_record_3_ab
+ dec a2
+ je rmarkr_record_3_aab
+ jmp rmarkr_hnf_3
+
+rmarkr_record_3_bb:
+ movl 8-4(a0),a1
+ subl $4,a0
+
+ movl neg_heap_p3_offset(a4),a2
+ addl a1,a2
+
+ movl $31*4,d0
+ andl a2,d0
+ shrl $7,a2
+ addl heap_vector_d4_offset(a4),a2
+
+ movl bit_set_table(d0),d0
+ orl d0,(,a2,4)
+
+ cmpl a0,a1
+ ja rmarkr_next_node
+
+ add d0,d0
+ jne rmarkr_bit_in_same_word1
+ inc a2
+ mov $1,d0
+rmarkr_bit_in_same_word1:
+ testl (,a2,4),d0
+ je rmarkr_not_yet_linked_bb
+
+ movl neg_heap_p3_offset(a4),a2
+ addl a0,a2
+
+ addl $2*4,a2
+
+ movl $31*4,d0
+ andl a2,d0
+ shrl $7,a2
+ addl heap_vector_d4_offset(a4),a2
+
+ movl bit_set_table(d0),d0
+ orl d0,(,a2,4)
+
+ movl (a1),a2
+ lea 8+2+1(a0),d0
+ movl a2,8(a0)
+ movl d0,(a1)
+ jmp rmarkr_next_node
+
+rmarkr_not_yet_linked_bb:
+ orl d0,(,a2,4)
+ movl (a1),a2
+ lea 8+2+1(a0),d0
+ movl a2,8(a0)
+ movl d0,(a1)
+ jmp rmarkr_next_node
+
+rmarkr_record_3_ab:
+ movl 4(a0),a1
+
+ movl neg_heap_p3_offset(a4),a2
+ addl a1,a2
+
+ movl $31*4,d0
+ andl a2,d0
+ shrl $7,a2
+ addl heap_vector_d4_offset(a4),a2
+
+ movl bit_set_table(d0),d0
+ orl d0,(,a2,4)
+
+ cmpl a0,a1
+ ja rmarkr_hnf_1
+
+ add d0,d0
+ jne rmarkr_bit_in_same_word2
+ inc a2
+ mov $1,d0
+rmarkr_bit_in_same_word2:
+ testl (,a2,4),d0
+ je rmarkr_not_yet_linked_ab
+
+ movl neg_heap_p3_offset(a4),a2
+ addl a0,a2
+
+ addl $4,a2
+
+ movl $31*4,d0
+ andl a2,d0
+ shrl $7,a2
+ addl heap_vector_d4_offset(a4),a2
+
+ movl bit_set_table(d0),d0
+ orl d0,(,a2,4)
+
+ movl (a1),a2
+ lea 4+2+1(a0),d0
+ movl a2,4(a0)
+ movl d0,(a1)
+ jmp rmarkr_hnf_1
+
+rmarkr_not_yet_linked_ab:
+ orl d0,(,a2,4)
+ movl (a1),a2
+ lea 4+2+1(a0),d0
+ movl a2,4(a0)
+ movl d0,(a1)
+ jmp rmarkr_hnf_1
+
+rmarkr_record_3_aab:
+ movl 4(a0),a1
+
+ movl neg_heap_p3_offset(a4),a2
+ addl a1,a2
+
+ movl $31*4,d0
+ andl a2,d0
+ shrl $7,a2
+ addl heap_vector_d4_offset(a4),a2
+
+ movl bit_set_table(d0),d0
+ testl (,a2,4),d0
+ jne rmarkr_shared_argument_part
+ orl d0,(,a2,4)
+
+ addl $2,(a0)
+ movl a3,4(a0)
+ addl $4,a0
+
+ movl (a1),a3
+ movl a0,(a1)
+ movl a3,a0
+ lea 1(a1),a3
+ jmp rmarkr_node
+
+rmarkr_record_2:
+ cmpw $1,-2+2(d0)
+ ja rmarkr_hnf_2
+ je rmarkr_hnf_1
+ subl $4,a0
+ jmp rmarkr_next_node
+
+rmarkr_record_1:
+ cmpw $0,-2+2(d0)
+ jne rmarkr_hnf_1
+ subl $4,a0
+ jmp rmarkr_next_node
+
+rmarkr_lazy_node_1:
+/ selectors:
+ jne rmarkr_selector_node_1
+
+rmarkr_hnf_1:
+ movl (a0),a2
+ movl a3,(a0)
+
+ leal 2(a0),a3
+ movl a2,a0
+ jmp rmarkr_node
+
+/ selectors
+rmarkr_indirection_node:
+ movl neg_heap_p3_offset(a4),d1
+ leal -4(a0,d1),d1
+
+ movl $31*4,d0
+ andl d1,d0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_clear_table(d0),d0
+ andl d0,(,d1,4)
+
+ movl (a0),a0
+ jmp rmarkr_node
+
+rmarkr_selector_node_1:
+ addl $3,a2
+ je rmarkr_indirection_node
+
+ movl (a0),a1
+
+ movl neg_heap_p3_offset(a4),d1
+ addl a1,d1
+
+ addl $1,a2
+ jle rmarkr_record_selector_node_1
+
+ push d0
+ movl $31*4,d0
+ andl d1,d0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(,d0),d0
+ andl (,d1,4),d0
+ pop d0
+ jne rmarkr_hnf_1
+
+ movl (a1),d1
+ testb $2,d1b
+ je rmarkr_hnf_1
+
+ cmpw $2,-2(d1)
+ jbe rmarkr_small_tuple_or_record
+
+rmarkr_large_tuple_or_record:
+ movl 8(a1),d1
+ addl neg_heap_p3_offset(a4),d1
+
+ push d0
+ movl $31*4,d0
+ andl d1,d0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(,d0),d0
+ andl (,d1,4),d0
+ pop d0
+ jne rmarkr_hnf_1
+
+ movl neg_heap_p3_offset(a4),d1
+ lea -4(a0,d1),d1
+
+ push a0
+
+ movl -8(d0),d0
+
+ movl $31*4,a0
+ andl d1,a0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_clear_table(a0),a0
+ andl a0,(,d1,4)
+
+ movzwl 4(d0),d0
+ cmpl $8,d0
+ jl rmarkr_tuple_or_record_selector_node_2
+ movl 8(a1),a1
+ je rmarkr_tuple_selector_node_2
+ movl -12(a1,d0),a0
+ pop a1
+ movl $__indirection,-4(a1)
+ movl a0,(a1)
+ jmp rmarkr_node
+
+rmarkr_tuple_selector_node_2:
+ movl (a1),a0
+ pop a1
+ movl $__indirection,-4(a1)
+ movl a0,(a1)
+ jmp rmarkr_node
+
+rmarkr_record_selector_node_1:
+ je rmarkr_strict_record_selector_node_1
+
+ push d0
+ movl $31*4,d0
+ andl d1,d0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(,d0),d0
+ movl (,d1,4),d1
+ andl d0,d1
+ pop d0
+ jne rmarkr_hnf_1
+
+ movl (a1),d1
+ testb $2,d1b
+ je rmarkr_hnf_1
+
+ cmpw $258,-2(d1)
+ jbe rmarkr_small_tuple_or_record
+
+ movl 8(a1),d1
+ addl neg_heap_p3_offset(a4),d1
+
+ push d0
+ movl $31*4,d0
+ andl d1,d0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(,d0),d0
+ andl (,d1,4),d0
+ pop d0
+ jne rmarkr_hnf_1
+
+rmarkr_small_tuple_or_record:
+ movl neg_heap_p3_offset(a4),d1
+ lea -4(a0,d1),d1
+
+ push a0
+
+ movl -8(d0),d0
+
+ movl $31*4,a0
+ andl d1,a0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_clear_table(a0),a0
+ andl a0,(,d1,4)
+
+ movzwl 4(d0),d0
+ cmpl $8,d0
+ jle rmarkr_tuple_or_record_selector_node_2
+ movl 8(a1),a1
+ subl $12,d0
+rmarkr_tuple_or_record_selector_node_2:
+ movl (a1,d0),a0
+ pop a1
+ movl $__indirection,-4(a1)
+ movl a0,(a1)
+ jmp rmarkr_node
+
+rmarkr_strict_record_selector_node_1:
+ push d0
+ movl $31*4,d0
+ andl d1,d0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(,d0),d0
+ andl (,d1,4),d0
+ pop d0
+ jne rmarkr_hnf_1
+
+ movl (a1),d1
+ testb $2,d1b
+ je rmarkr_hnf_1
+
+ cmpw $258,-2(d1)
+ jbe rmarkr_select_from_small_record
+
+ movl 8(a1),d1
+ addl neg_heap_p3_offset(a4),d1
+
+ push d0
+ movl $31*4,d0
+ andl d1,d0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(d0),d0
+ andl (,d1,4),d0
+ pop d0
+ jne rmarkr_hnf_1
+
+rmarkr_select_from_small_record:
+ movl -8(d0),d0
+ subl $4,a0
+
+ movzwl 4(d0),d1
+ cmpl $8,d1
+ jle rmarkr_strict_record_selector_node_2
+ addl 8(a1),d1
+ movl -12(d1),d1
+ jmp rmarkr_strict_record_selector_node_3
+rmarkr_strict_record_selector_node_2:
+ movl (a1,d1),d1
+rmarkr_strict_record_selector_node_3:
+ movl d1,4(a0)
+
+ movzwl 6(d0),d1
+ testl d1,d1
+ je rmarkr_strict_record_selector_node_5
+ cmpl $8,d1
+ jle rmarkr_strict_record_selector_node_4
+ movl 8(a1),a1
+ subl $12,d1
+rmarkr_strict_record_selector_node_4:
+ movl (a1,d1),d1
+ movl d1,8(a0)
+rmarkr_strict_record_selector_node_5:
+
+ movl -4(d0),d0
+ movl d0,(a0)
+ jmp rmarkr_next_node
+
+/ a2,d1: free
+
+rmarkr_next_node:
+ test $3,a3
+ jne rmarkr_parent
+
+ movl -4(a3),a2
+ movl $3,d1
+
+ andl a2,d1
+ subl $4,a3
+
+ cmpl $3,d1
+ je rmarkr_argument_part_cycle1
+
+ movl 4(a3),a1
+ movl a1,(a3)
+
+rmarkr_c_argument_part_cycle1:
+ cmpl a3,a0
+ ja rmarkr_no_reverse_1
+
+ movl (a0),a1
+ leal 4+1(a3),d0
+ movl a1,4(a3)
+ movl d0,(a0)
+
+ orl d1,a3
+ movl a2,a0
+ xorl d1,a0
+ jmp rmarkr_node
+
+rmarkr_no_reverse_1:
+ movl a0,4(a3)
+ movl a2,a0
+ orl d1,a3
+ xorl d1,a0
+ jmp rmarkr_node
+
+rmarkr_lazy_node:
+ movl -4(d0),a2
+ test a2,a2
+ je rmarkr_next_node
+
+ addl $4,a0
+
+ subl $1,a2
+ jle rmarkr_lazy_node_1
+
+ cmpl $255,a2
+ jge rmarkr_closure_with_unboxed_arguments
+
+rmarkr_closure_with_unboxed_arguments_:
+ orl $2,(a0)
+ leal (a0,a2,4),a0
+
+ movl (a0),a2
+ movl a3,(a0)
+ movl a0,a3
+ movl a2,a0
+ jmp rmarkr_node
+
+rmarkr_closure_with_unboxed_arguments:
+/ (a_size+b_size)+(b_size<<8)
+/ addl $1,a2
+ movl a2,d0
+ andl $255,a2
+ shrl $8,d0
+ subl d0,a2
+/ subl $1,a2
+ jg rmarkr_closure_with_unboxed_arguments_
+ je rmarkr_hnf_1
+ subl $4,a0
+ jmp rmarkr_next_node
+
+rmarkr_hnf_0:
+ cmpl $INT+2,d0
+ je rmarkr_int_3
+
+ cmpl $CHAR+2,d0
+ je rmarkr_char_3
+
+ jb rmarkr_no_normal_hnf_0
+
+ movl neg_heap_p3_offset(a4),d1
+ addl a0,d1
+
+ movl $31*4,a0
+ andl d1,a0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_clear_table(a0),a0
+ andl a0,(,d1,4)
+
+ lea ZERO_ARITY_DESCRIPTOR_OFFSET-2(d0),a0
+ jmp rmarkr_next_node_after_static
+
+rmarkr_int_3:
+ movl 4(a0),a2
+ cmpl $33,a2
+ jnc rmarkr_next_node
+
+ movl neg_heap_p3_offset(a4),d1
+ addl a0,d1
+
+ movl $31*4,a0
+ andl d1,a0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_clear_table(a0),a0
+ andl a0,(,d1,4)
+
+ lea small_integers(,a2,8),a0
+ jmp rmarkr_next_node_after_static
+
+rmarkr_char_3:
+ movl neg_heap_p3_offset(a4),d1
+
+ movzbl 4(a0),d0
+ addl a0,d1
+
+ movl $31*4,a2
+ andl d1,a2
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_clear_table(a2),a2
+ andl a2,(,d1,4)
+
+ lea static_characters(,d0,8),a0
+ jmp rmarkr_next_node_after_static
+
+rmarkr_no_normal_hnf_0:
+ cmpl $__ARRAY__+2,d0
+ jne rmarkr_next_node
+
+ movl 8(a0),d0
+ test d0,d0
+ je rmarkr_lazy_array
+
+ movzwl -2+2(d0),d1
+ test d1,d1
+ je rmarkr_b_array
+
+ movzwl -2(d0),d0
+ test d0,d0
+ je rmarkr_b_array
+
+ subl $256,d0
+ cmpl d0,d1
+ je rmarkr_a_record_array
+
+rmarkr_ab_record_array:
+ movl 4(a0),a1
+ addl $8,a0
+ pushl a0
+
+ imull d0,a1
+ shl $2,a1
+
+ subl d1,d0
+ addl $4,a0
+ addl a0,a1
+ call reorder
+
+ popl a0
+ movl d1,d0
+ imull -4(a0),d0
+ jmp rmarkr_lr_array
+
+rmarkr_b_array:
+ movl neg_heap_p3_offset(a4),a2
+ addl a0,a2
+
+ addl $4,a2
+
+ movl $31*4,d0
+ andl a2,d0
+ shrl $7,a2
+ addl heap_vector_d4_offset(a4),a2
+
+ movl bit_set_table(d0),d0
+ orl d0,(,a2,4)
+ jmp rmarkr_next_node
+
+rmarkr_a_record_array:
+ movl 4(a0),d0
+ addl $8,a0
+ cmpl $2,d1
+ jb rmarkr_lr_array
+
+ imull d1,d0
+ jmp rmarkr_lr_array
+
+rmarkr_lazy_array:
+ movl 4(a0),d0
+ addl $8,a0
+
+rmarkr_lr_array:
+ movl neg_heap_p3_offset(a4),a1
+ addl a0,a1
+ shrl $2,a1
+ addl d0,a1
+
+ movl $31,d1
+ andl a1,d1
+ shrl $5,a1
+ addl heap_vector_d4_offset(a4),a1
+
+ movl bit_set_table(,d1,4),d1
+ orl d1,(,a1,4)
+
+ cmpl $1,d0
+ jbe rmarkr_array_length_0_1
+
+ movl a0,a1
+ lea (a0,d0,4),a0
+
+ movl (a0),d0
+ movl (a1),d1
+ movl d0,(a1)
+ movl d1,(a0)
+
+ movl -4(a0),d0
+ subl $4,a0
+ addl $2,d0
+ movl -4(a1),d1
+ subl $4,a1
+ movl d1,(a0)
+ movl d0,(a1)
+
+ movl -4(a0),d0
+ subl $4,a0
+ movl a3,(a0)
+ movl a0,a3
+ movl d0,a0
+ jmp rmarkr_node
+
+rmarkr_array_length_0_1:
+ lea -8(a0),a0
+ jb rmarkr_next_node
+
+ movl 12(a0),d1
+ movl 8(a0),a2
+ movl a2,12(a0)
+ movl 4(a0),a2
+ movl a2,8(a0)
+ movl d1,4(a0)
+ addl $4,a0
+ jmp rmarkr_hnf_1
+
+/ a2: free
+
+rmarkr_parent:
+ movl a3,d1
+ andl $3,d1
+
+ andl $-4,a3
+ je end_rmarkr
+
+ subl $1,d1
+ je rmarkr_argument_part_parent
+
+ movl (a3),a2
+
+ cmpl a3,a0
+ ja rmarkr_no_reverse_2
+
+ movl a0,a1
+ leal 1(a3),d0
+ movl (a1),a0
+ movl d0,(a1)
+
+rmarkr_no_reverse_2:
+ movl a0,(a3)
+ leal -4(a3),a0
+ movl a2,a3
+ jmp rmarkr_next_node
+
+rmarkr_argument_part_parent:
+ movl (a3),a2
+
+ movl a3,a1
+ movl a0,a3
+ movl a1,a0
+
+rmarkr_skip_upward_pointers:
+ movl a2,d0
+ andl $3,d0
+ cmpl $3,d0
+ jne rmarkr_no_upward_pointer
+
+ leal -3(a2),a1
+ movl -3(a2),a2
+ jmp rmarkr_skip_upward_pointers
+
+rmarkr_no_upward_pointer:
+ cmpl a0,a3
+ ja rmarkr_no_reverse_3
+
+ movl a3,d1
+ movl (a3),a3
+ leal 1(a0),d0
+ movl d0,(d1)
+
+rmarkr_no_reverse_3:
+ movl a3,(a1)
+ lea -4(a2),a3
+
+ andl $-4,a3
+
+ movl a3,a1
+ movl $3,d1
+
+ movl (a3),a2
+
+ andl a2,d1
+ movl 4(a1),d0
+
+ orl d1,a3
+ movl d0,(a1)
+
+ cmpl a1,a0
+ ja rmarkr_no_reverse_4
+
+ movl (a0),d0
+ movl d0,4(a1)
+ leal 4+2+1(a1),d0
+ movl d0,(a0)
+ movl a2,a0
+ andl $-4,a0
+ jmp rmarkr_node
+
+rmarkr_no_reverse_4:
+ movl a0,4(a1)
+ movl a2,a0
+ andl $-4,a0
+ jmp rmarkr_node
+
+rmarkr_argument_part_cycle1:
+ movl 4(a3),d0
+ push a1
+
+rmarkr_skip_pointer_list1:
+ movl a2,a1
+ andl $-4,a1
+ movl (a1),a2
+ movl $3,d1
+ andl a2,d1
+ cmpl $3,d1
+ je rmarkr_skip_pointer_list1
+
+ movl d0,(a1)
+ pop a1
+ jmp rmarkr_c_argument_part_cycle1
+
+rmarkr_next_node_after_static:
+ test $3,a3
+ jne rmarkr_parent_after_static
+
+ movl -4(a3),a2
+ movl $3,d1
+
+ andl a2,d1
+ subl $4,a3
+
+ cmpl $3,d1
+ je rmarkr_argument_part_cycle2
+
+ movl 4(a3),d0
+ movl d0,(a3)
+
+rmarkr_c_argument_part_cycle2:
+ movl a0,4(a3)
+ movl a2,a0
+ orl d1,a3
+ xorl d1,a0
+ jmp rmarkr_node
+
+rmarkr_parent_after_static:
+ movl a3,d1
+ andl $3,d1
+
+ andl $-4,a3
+ je end_rmarkr_after_static
+
+ subl $1,d1
+ je rmarkr_argument_part_parent_after_static
+
+ movl (a3),a2
+ movl a0,(a3)
+ leal -4(a3),a0
+ movl a2,a3
+ jmp rmarkr_next_node
+
+rmarkr_argument_part_parent_after_static:
+ movl (a3),a2
+
+ movl a3,a1
+ movl a0,a3
+ movl a1,a0
+
+/ movl (a1),a2
+rmarkr_skip_upward_pointers_2:
+ movl a2,d0
+ andl $3,d0
+ cmpl $3,d0
+ jne rmarkr_no_reverse_3
+
+/ movl a2,a1
+/ andl $-4,a1
+/ movl (a1),a2
+ lea -3(a2),a1
+ movl -3(a2),a2
+ jmp rmarkr_skip_upward_pointers_2
+
+rmarkr_argument_part_cycle2:
+ movl 4(a3),d0
+ push a1
+
+rmarkr_skip_pointer_list2:
+ movl a2,a1
+ andl $-4,a1
+ movl (a1),a2
+ movl $3,d1
+ andl a2,d1
+ cmpl $3,d1
+ je rmarkr_skip_pointer_list2
+
+ movl d0,(a1)
+ pop a1
+ jmp rmarkr_c_argument_part_cycle2
+
+end_rmarkr_after_static:
+ movl (sp),a3
+ addl $8,sp
+ movl a0,(a3)
+ jmp rmarkr_next_stack_node
+
+end_rmarkr:
+ popl a3
+ popl d1
+
+ cmpl d1,a0
+ ja rmark_no_reverse_4
+
+ movl a0,a1
+ leal 1(a3),d0
+ movl (a0),a0
+ movl d0,(a1)
+
+rmark_no_reverse_4:
+ movl a0,(a3)
+
+rmarkr_next_stack_node:
+ cmpl end_stack_offset(a4),sp
+ jae rmark_next_node
+
+ movl (sp),a0
+ movl 4(sp),a3
+ addl $8,sp
+
+ cmpl $1,a0
+ ja rmark_using_reversal
+
+ jmp rmark_next_node_
diff --git a/thread/icopy.s b/thread/icopy.s
new file mode 100644
index 0000000..e9a4b76
--- /dev/null
+++ b/thread/icopy.s
@@ -0,0 +1,1215 @@
+
+a4_copy_sp_offset = 0
+heap_p1_copy_sp_offset = 4
+heap_p2_copy_sp_offset = 8
+semi_space_size_sp_offset = 12
+stack_p_copy_sp_offset = 16
+heap_copied_vector_copy_sp_offset = 20
+copy_sp_offset_5 = 24
+copy_sp_offset_4 = 28
+copy_sp_offset_3 = 32
+copy_sp_offset_2 = 36
+copy_sp_offset_1 = 40
+
+ push a3
+
+ lea -44(sp),sp
+ movl a4,a4_copy_sp_offset(sp)
+
+ movl heap_p1_offset(a4),d0
+ movl d0,heap_p1_copy_sp_offset(sp)
+
+ movl stack_p_offset(a4),d0
+ movl d0,stack_p_copy_sp_offset(sp)
+
+ movl heap_copied_vector_offset(a4),d0
+ movl d0,heap_copied_vector_copy_sp_offset(sp)
+
+ movl heap_size_129_offset(a4),d0
+ shl $6,d0
+
+ movl heap_p2_offset(a4),a4
+ movl a4,heap_p2_copy_sp_offset(sp)
+
+ movl d0,semi_space_size_sp_offset(sp)
+ lea (a4,d0),a3
+
+#ifdef WRITE_HEAP
+ movl a3,heap2_begin_and_end+4
+#endif
+
+ movl caf_list,d0
+ test d0,d0
+ je end_copy_cafs
+
+copy_cafs_lp:
+ movl -4(d0),a1
+ movl (d0),d1
+ lea 4(d0),a2
+
+ movl a1,copy_sp_offset_1(sp)
+
+ subl $1,d1
+ call copy_lp2
+
+ movl copy_sp_offset_1(sp),d0
+ test d0,d0
+ jne copy_cafs_lp
+
+end_copy_cafs:
+ movl 44(sp),d1
+
+ mov stack_p_copy_sp_offset(sp),a2
+ sub a2,d1
+ shr $2,d1
+
+ sub $1,d1
+ jb end_copy0
+ call copy_lp2
+end_copy0:
+ movl heap_p2_copy_sp_offset(sp),a2
+
+ jmp copy_lp1
+/
+/ Copy all referenced nodes to the other semi space
+/
+
+in_hnf_1_2:
+ dec d1
+copy_lp2_lp1:
+ call copy_lp2
+copy_lp1:
+ cmp a4,a2
+ jae end_copy1
+
+ mov (a2),d0
+ add $4,a2
+ testb $2,d0b
+ je not_in_hnf_1
+in_hnf_1:
+ movzwl -2(d0),d1
+
+ test d1,d1
+ je copy_array_21
+
+ cmp $2,d1
+ jbe in_hnf_1_2
+
+ cmp $256,d1
+ jae copy_record_21
+
+ mov 4(a2),d0
+ testb $1,d0b
+ jne node_without_arguments_part
+
+ movl d1,copy_sp_offset_2(sp)
+ xorl d1,d1
+
+ call copy_lp2
+
+ movl copy_sp_offset_2(sp),d1
+ add $4,a2
+
+ sub $2,d1
+ jmp copy_lp2_lp1
+
+node_without_arguments_part:
+ dec d0
+ xorl d1,d1
+
+ mov d0,4(a2)
+ call copy_lp2
+
+ add $4,a2
+ jmp copy_lp1
+
+copy_record_21:
+ subl $258,d1
+ ja copy_record_arguments_3
+
+ movzwl -2+2(d0),d1
+ jb copy_record_arguments_1
+
+ subl $1,d1
+ ja copy_lp2_lp1
+ jmp copy_node_arity1
+
+copy_record_arguments_1:
+ dec d1
+ jmp copy_lp2_lp1
+
+copy_record_arguments_3:
+ testb $1,4(a2)
+ jne record_node_without_arguments_part
+
+ movzwl -2+2(d0),a1
+ subl $1,a1
+
+ lea 3*4(a2,d1,4),a0
+ movl a0,copy_sp_offset_2(sp)
+ movl a1,copy_sp_offset_3(sp)
+
+ sub d1,d1
+ call copy_lp2
+
+ addl $4,a2
+ movl copy_sp_offset_3(sp),d1
+ dec d1
+ call copy_lp2
+
+ movl copy_sp_offset_2(sp),a2
+ jmp copy_lp1
+
+record_node_without_arguments_part:
+ andl $-2,4(a2)
+
+ sub d1,d1
+ call copy_lp2
+
+ addl $4,a2
+ jmp copy_lp1
+
+not_in_hnf_1:
+ mov -4(d0),d1
+ cmpl $257,d1
+ jge copy_unboxed_closure_arguments
+ sub $1,d1
+ jg copy_lp2_lp1
+
+copy_node_arity1:
+ xorl d1,d1
+ call copy_lp2
+
+ add $4,a2
+ jmp copy_lp1
+
+copy_unboxed_closure_arguments:
+ je copy_unboxed_closure_arguments1
+
+ xorl d0,d0
+ movb d1hb,d0lb
+ andl $255,d1
+ sub d0,d1
+
+ subl $1,d1
+ jl copy_unboxed_closure_arguments_without_pointers
+
+ movl d0,copy_sp_offset_2(sp)
+ call copy_lp2
+ movl copy_sp_offset_2(sp),d0
+
+copy_unboxed_closure_arguments_without_pointers:
+ lea (a2,d0,4),a2
+ jmp copy_lp1
+
+copy_unboxed_closure_arguments1:
+ addl $8,a2
+ jmp copy_lp1
+
+copy_array_21:
+ movl 4(a2),d1
+ addl $8,a2
+ test d1,d1
+ je copy_array_21_a
+
+ movzwl -2(d1),d0
+ movzwl -2+2(d1),d1
+ subl $256,d0
+ test d1,d1
+ je copy_array_21_b
+
+ cmpl d0,d1
+ je copy_array_21_r_a
+
+copy_array_21_ab:
+ cmpl $0,-8(a2)
+ je copy_lp1
+
+ subl d1,d0
+ shl $2,d0
+ subl $1,d1
+
+ movl d1,copy_sp_offset_2(sp)
+ movl d0,copy_sp_offset_3(sp)
+ movl -8(a2),d1
+ subl $1,d1
+ movl d1,copy_sp_offset_4(sp)
+
+copy_array_21_lp_ab:
+ movl copy_sp_offset_2(sp),d1
+ call copy_lp2
+
+ addl copy_sp_offset_3(sp),a2
+ subl $1,copy_sp_offset_4(sp)
+ jnc copy_array_21_lp_ab
+
+ jmp copy_lp1
+
+copy_array_21_b:
+ movl -8(a2),d1
+ imull d0,d1
+ lea (a2,d1,4),a2
+ jmp copy_lp1
+
+copy_array_21_r_a:
+ movl -8(a2),d1
+ imull d0,d1
+ subl $1,d1
+ jc copy_lp1
+ jmp copy_lp2_lp1
+
+copy_array_21_a:
+ movl -8(a2),d1
+ subl $1,d1
+ jc copy_lp1
+ jmp copy_lp2_lp1
+
+/
+/ Copy nodes to the other semi-space
+/
+
+copy_lp2:
+ movl (a2),a1
+
+/ selectors:
+continue_after_selector_2:
+ movl (a1),a0
+ testb $2,a0b
+ je not_in_hnf_2
+
+in_hnf_2:
+ movzwl -2(a0),d0
+ test d0,d0
+ je copy_arity_0_node2
+
+ cmp $256,d0
+ jae copy_record_2
+
+ sub $2,d0
+ mov a4,(a2)
+
+ lea 4(a2),a2
+ ja copy_hnf_node2_3
+
+ mov a0,(a4)
+ jb copy_hnf_node2_1
+
+ inc a4
+ mov 4(a1),a0
+
+ mov a4,(a1)
+ mov 8(a1),d0
+
+ sub $1,d1
+ mov a0,4-1(a4)
+
+ mov d0,8-1(a4)
+ lea 12-1(a4),a4
+
+ jae copy_lp2
+ ret
+
+copy_hnf_node2_1:
+ inc a4
+ mov 4(a1),d0
+
+ sub $1,d1
+ mov a4,(a1)
+
+ mov d0,4-1(a4)
+ lea 8-1(a4),a4
+
+ jae copy_lp2
+ ret
+
+copy_hnf_node2_3:
+ mov a0,(a4)
+ inc a4
+
+ mov a4,(a1)
+ mov 4(a1),a0
+
+ mov a0,4-1(a4)
+ mov 8(a1),a0
+
+ add $12-1,a4
+ mov (a0),a1
+
+ testb $1,a1b
+ jne arguments_already_copied_2
+
+ mov a4,-4(a4)
+ add $4,a0
+
+ mov a1,(a4)
+ inc a4
+
+ mov a4,-4(a0)
+ add $4-1,a4
+
+cp_hnf_arg_lp2:
+ mov (a0),a1
+ add $4,a0
+
+ mov a1,(a4)
+ add $4,a4
+
+ dec d0
+ jne cp_hnf_arg_lp2
+
+ sub $1,d1
+ jae copy_lp2
+ ret
+
+arguments_already_copied_2:
+ mov a1,-4(a4)
+
+ sub $1,d1
+ jae copy_lp2
+ ret
+
+copy_arity_0_node2:
+ cmp $INT+2,a0
+ jb copy_real_file_or_string_2
+
+ cmp $CHAR+2,a0
+ ja copy_normal_hnf_0_2
+
+ mov 4(a1),d0
+
+ je copy_char_2
+
+ cmp $INT+2,a0
+ jne no_small_int_or_char_2
+
+copy_int_2:
+ cmp $33,d0
+ jae no_small_int_or_char_2
+
+ shl $3,d0
+ add $4,a2
+
+ add $small_integers,d0
+ sub $1,d1
+
+ mov d0,-4(a2)
+ jae copy_lp2
+
+ ret
+
+copy_char_2:
+ andl $255,d0
+
+ shl $3,d0
+ add $4,a2
+
+ add $static_characters,d0
+ sub $1,d1
+
+ mov d0,-4(a2)
+ jae copy_lp2
+ ret
+
+no_small_int_or_char_2:
+
+copy_record_node2_1_b:
+ mov a0,-8(a3)
+ add $4,a2
+
+ mov d0,-4(a3)
+ sub $7,a3
+
+ mov a3,(a1)
+ dec a3
+
+ mov a3,-4(a2)
+
+ sub $1,d1
+ jae copy_lp2
+ ret
+
+copy_normal_hnf_0_2:
+ sub $2-ZERO_ARITY_DESCRIPTOR_OFFSET,a0
+ sub $1,d1
+
+ mov a0,(a2)
+ lea 4(a2),a2
+
+ jae copy_lp2
+ ret
+
+copy_real_file_or_string_2:
+ cmpl $__STRING__+2,a0
+ jbe copy_string_or_array_2
+
+copy_real_or_file_2:
+ mov a0,-12(a3)
+ sub $12-1,a3
+
+ mov a3,(a1)
+ dec a3
+
+ mov 4(a1),d0
+ mov 8(a1),a0
+
+ mov a3,(a2)
+ add $4,a2
+
+ mov d0,4(a3)
+ sub $1,d1
+
+ mov a0,8(a3)
+
+ jae copy_lp2
+ ret
+
+already_copied_2:
+ dec a0
+ sub $1,d1
+
+ mov a0,(a2)
+ lea 4(a2),a2
+
+ jae copy_lp2
+ ret
+
+copy_record_2:
+ subl $258,d0
+ ja copy_record_node2_3
+ jb copy_record_node2_1
+
+ cmpw $0,-2+2(a0)
+ je copy_real_or_file_2
+
+ movl a4,(a2)
+ movl a0,(a4)
+
+ lea 1(a4),a0
+ movl 4(a1),d0
+
+ movl a0,(a1)
+
+ movl d0,4(a4)
+ movl 8(a1),d0
+
+ addl $4,a2
+ movl d0,8(a4)
+
+ addl $12,a4
+ sub $1,d1
+ jae copy_lp2
+ ret
+
+copy_record_node2_1:
+ movl 4(a1),d0
+
+ cmpw $0,-2+2(a0)
+ je copy_record_node2_1_b
+
+ movl a4,(a2)
+ movl a0,(a4)
+
+ lea 1(a4),a0
+ movl d0,4(a4)
+
+ movl a0,(a1)
+ addl $4,a2
+
+ addl $8,a4
+ sub $1,d1
+ jae copy_lp2
+ ret
+
+copy_record_node2_3:
+ cmpw $1,-2+2(a0)
+ jbe copy_record_node2_3_ab_or_b
+
+ movl d0,copy_sp_offset_5+4(sp)
+ lea 1(a4),d0
+
+ movl d0,(a1)
+ movl 8(a1),d0
+
+ movl a0,(a4)
+ movl 4(a1),a1
+
+ movl a1,4(a4)
+ movl a4,(a2)
+ addl $4,a2
+
+ movl d0,a0
+ testl $1,(d0)
+ jne record_arguments_already_copied_2
+
+ lea 12(a4),a1
+
+ movl copy_sp_offset_5+4(sp),d0
+
+ movl a1,8(a4)
+
+ addl $13,a4
+ movl (a0),a1
+
+ movl a4,(a0)
+ addl $4,a0
+
+ movl a1,-1(a4)
+ addl $3,a4
+
+cp_record_arg_lp2:
+ movl (a0),a1
+ addl $4,a0
+
+ movl a1,(a4)
+ addl $4,a4
+
+ subl $1,d0
+ jne cp_record_arg_lp2
+
+ subl $1,d1
+ jae copy_lp2
+ ret
+
+record_arguments_already_copied_2:
+ movl (a0),a1
+
+ movl copy_sp_offset_5+4(sp),d0
+
+ movl a1,8(a4)
+ addl $12,a4
+
+ subl $1,d1
+ jae copy_lp2
+ ret
+
+copy_record_node2_3_ab_or_b:
+ jb copy_record_node2_3_b
+
+copy_record_node2_3_ab:
+ movl d0,copy_sp_offset_5+4(sp)
+
+ lea 1(a4),d0
+
+ movl d0,(a1)
+ movl 8(a1),d0
+
+ movl a0,(a4)
+ movl 4(a1),a1
+
+ movl d0,a0
+ subl heap_p1_copy_sp_offset+4(sp),d0
+
+ shr $3,d0
+ movl a1,4(a4)
+
+ mov d0,a1
+ and $31,d0
+
+ shr $3,a1
+ movl a4,(a2)
+
+ andl $-4,a1
+ mov bit_set_table(,d0,4),d0
+
+ addl heap_copied_vector_copy_sp_offset+4(sp),a1
+ addl $4,a2
+
+ test (a1),d0
+ jne record_arguments_already_copied_2
+
+ or d0,(a1)
+ movl copy_sp_offset_5+4(sp),d0
+
+ subl $4,a3
+
+ shl $2,d0
+ subl d0,a3
+
+ movl a3,copy_sp_offset_5+4(sp)
+ addl $1,a3
+
+ movl a3,8(a4)
+ addl $12,a4
+
+ movl (a0),a1
+ jmp cp_record_arg_lp3_c
+
+copy_record_node2_3_b:
+ movl d0,copy_sp_offset_5+4(sp)
+ lea -12+1(a3),d0
+
+ movl d0,(a1)
+ movl 8(a1),d0
+
+ movl a0,-12(a3)
+ movl 4(a1),a1
+
+ movl d0,a0
+ subl heap_p1_copy_sp_offset+4(sp),d0
+
+ shr $3,d0
+ movl a1,-8(a3)
+
+ mov d0,a1
+ and $31,d0
+ subl $12,a3
+
+ shr $3,a1
+ movl a3,(a2)
+
+ andl $-4,a1
+ mov bit_set_table(,d0,4),d0
+
+ addl heap_copied_vector_copy_sp_offset+4(sp),a1
+ addl $4,a2
+
+ test (a1),d0
+ jne record_arguments_already_copied_3_b
+
+ or d0,(a1)
+ movl copy_sp_offset_5+4(sp),d0
+
+ movl a3,a1
+ subl $4,a3
+
+ shl $2,d0
+ subl d0,a3
+
+ movl a3,8(a1)
+
+ movl (a0),a1
+
+ movl a3,copy_sp_offset_5+4(sp)
+ addl $1,a3
+
+cp_record_arg_lp3_c:
+ movl a3,(a0)
+ addl $4,a0
+ movl a1,-1(a3)
+
+ addl $3,a3
+
+cp_record_arg_lp3:
+ movl (a0),a1
+ addl $4,a0
+
+ movl a1,(a3)
+ addl $4,a3
+
+ subl $4,d0
+ jne cp_record_arg_lp3
+
+ movl copy_sp_offset_5+4(sp),a3
+
+ subl $1,d1
+ jae copy_lp2
+ ret
+
+record_arguments_already_copied_3_b:
+ movl (a0),a1
+
+ movl d0,copy_sp_offset_5+4(sp)
+
+ subl $1,a1
+ movl a1,8(a3)
+
+ subl $1,d1
+ jae copy_lp2
+ ret
+
+not_in_hnf_2:
+ testb $1,a0b
+ jne already_copied_2
+
+ mov -4(a0),d0
+ test d0,d0
+ jle copy_arity_0_node2_
+
+copy_node2_1_:
+ andl $255,d0
+ sub $2,d0
+ jl copy_arity_1_node2
+copy_node2_3:
+ mov a4,(a2)
+ add $4,a2
+ mov a0,(a4)
+ inc a4
+ mov a4,(a1)
+ mov 4(a1),a0
+ add $8,a1
+ mov a0,4-1(a4)
+ add $8-1,a4
+
+cp_arg_lp2:
+ mov (a1),a0
+ add $4,a1
+ mov a0,(a4)
+ add $4,a4
+ sub $1,d0
+ jae cp_arg_lp2
+
+ sub $1,d1
+ jae copy_lp2
+ ret
+
+copy_arity_1_node2__:
+ movl copy_sp_offset_5+4(sp),d1
+
+copy_arity_1_node2:
+copy_arity_1_node2_:
+ mov a4,(a2)
+ inc a4
+
+ add $4,a2
+ mov a4,(a1)
+
+ mov 4(a1),d0
+ mov a0,-1(a4)
+
+ mov d0,4-1(a4)
+ add $12-1,a4
+
+ sub $1,d1
+ jae copy_lp2
+ ret
+
+copy_indirection_2:
+ mov a1,d0
+ mov 4(a1),a1
+
+ mov (a1),a0
+ testb $2,a0b
+ jne in_hnf_2
+
+ testb $1,a0b
+ jne already_copied_2
+
+ cmpl $-2,-4(a0)
+ je skip_indirections_2
+
+ mov -4(a0),d0
+ test d0,d0
+ jle copy_arity_0_node2_
+ jmp copy_node2_1_
+
+skip_indirections_2:
+ mov 4(a1),a1
+
+ mov (a1),a0
+ testb $2,a0b
+ jne update_indirection_list_2
+ testb $1,a0b
+ jne update_indirection_list_2
+
+ cmpl $-2,-4(a0)
+ je skip_indirections_2
+
+update_indirection_list_2:
+ lea 4(d0),a0
+ mov 4(d0),d0
+ mov a1,(a0)
+ cmp d0,a1
+ jne update_indirection_list_2
+
+ jmp continue_after_selector_2
+
+copy_selector_2:
+ cmpl $-2,d0
+ je copy_indirection_2
+ jl copy_record_selector_2
+
+ mov 4(a1),d0
+
+ movl d1,copy_sp_offset_5+4(sp)
+
+ mov (d0),d1
+ testb $2,d1b
+ je copy_arity_1_node2__
+
+ cmpw $2,-2(d1)
+ jbe copy_selector_2_
+
+ movl 8(d0),d1
+ testb $1,(d1)
+ jne copy_arity_1_node2__
+
+ movl -8(a0),a0
+
+ movzwl 4(a0),a0
+ movl $__indirection,(a1)
+
+ cmpl $8,a0
+ jl copy_selector_2_1
+ je copy_selector_2_2
+
+ movl -12(d1,a0),a0
+
+ movl copy_sp_offset_5+4(sp),d1
+
+ movl a0,4(a1)
+ movl a0,a1
+ jmp continue_after_selector_2
+
+copy_selector_2_1:
+ movl 4(d0),a0
+
+ movl copy_sp_offset_5+4(sp),d1
+
+ movl a0,4(a1)
+ movl a0,a1
+ jmp continue_after_selector_2
+
+copy_selector_2_2:
+ movl (d1),a0
+
+ movl copy_sp_offset_5+4(sp),d1
+
+ movl a0,4(a1)
+ movl a0,a1
+ jmp continue_after_selector_2
+
+copy_selector_2_:
+ movl -8(a0),a0
+
+ movl copy_sp_offset_5+4(sp),d1
+
+ movzwl 4(a0),a0
+ movl $__indirection,(a1)
+
+ movl (d0,a0),a0
+ movl a0,4(a1)
+ movl a0,a1
+ jmp continue_after_selector_2
+
+copy_record_selector_2:
+ cmpl $-3,d0
+ movl 4(a1),d0
+ movl (d0),d0
+ je copy_strict_record_selector_2
+
+ testb $2,d0b
+ je copy_arity_1_node2_
+
+ cmpw $258,-2(d0)
+ jbe copy_record_selector_2_
+
+ cmpw $2,-2+2(d0)
+ jae copy_selector_2__
+
+ movl 4(a1),d0
+ movl a1,copy_sp_offset_5+4(sp)
+
+ movl 8(d0),a1
+ subl heap_p1_copy_sp_offset+4(sp),a1
+
+ mov $31*8,d0
+ and a1,d0
+ shr $6,a1
+
+ shr $1,d0
+ andl $-4,a1
+
+ addl heap_copied_vector_copy_sp_offset+4(sp),a1
+
+ mov bit_set_table(d0),d0
+ andl (a1),d0
+
+ movl copy_sp_offset_5+4(sp),a1
+
+ je copy_record_selector_2_
+ jmp copy_arity_1_node2_
+
+copy_selector_2__:
+ mov 4(a1),d0
+ mov 8(d0),d0
+ testb $1,(d0)
+ jne copy_arity_1_node2_
+
+copy_record_selector_2_:
+ movl -8(a0),d0
+ movl 4(a1),a0
+ movl $__indirection,(a1)
+
+ movzwl 4(d0),d0
+ cmpl $8,d0
+ jle copy_record_selector_3
+ movl 8(a0),a0
+ subl $12,d0
+copy_record_selector_3:
+ movl (a0,d0),a0
+
+ movl a0,4(a1)
+
+ movl a0,a1
+ jmp continue_after_selector_2
+
+copy_strict_record_selector_2:
+ testb $2,d0b
+ je copy_arity_1_node2_
+
+ cmpw $258,-2(d0)
+ jbe copy_strict_record_selector_2_
+
+ cmpw $2,-2+2(d0)
+ jb copy_strict_record_selector_2_b
+
+ movl 4(a1),d0
+ movl 8(d0),d0
+ testb $1,(d0)
+ jne copy_arity_1_node2_
+
+ jmp copy_strict_record_selector_2_
+
+copy_strict_record_selector_2_b:
+ movl 4(a1),d0
+ movl a1,copy_sp_offset_5+4(sp)
+
+ movl 8(d0),d0
+ subl heap_p1_copy_sp_offset+4(sp),d0
+
+ mov d0,a1
+ and $31*8,d0
+
+ shr $6,a1
+
+ shr $1,d0
+ andl $-4,a1
+
+ addl heap_copied_vector_copy_sp_offset+4(sp),a1
+
+ mov bit_set_table(d0),d0
+
+ and (a1),d0
+ movl copy_sp_offset_5+4(sp),a1
+
+ jne copy_arity_1_node2_
+
+copy_strict_record_selector_2_:
+ movl -8(a0),d0
+
+ movl d1,copy_sp_offset_5+4(sp)
+
+ movl 4(a1),a0
+
+ movzwl 4(d0),d1
+ cmpl $8,d1
+ jle copy_strict_record_selector_3
+ addl 8(a0),d1
+ movl -12(d1),d1
+ jmp copy_strict_record_selector_4
+copy_strict_record_selector_3:
+ movl (a0,d1),d1
+copy_strict_record_selector_4:
+ movl d1,4(a1)
+
+ movzwl 6(d0),d1
+ testl d1,d1
+ je copy_strict_record_selector_6
+ cmpl $8,d1
+ jle copy_strict_record_selector_5
+ movl 8(a0),a0
+ subl $12,d1
+copy_strict_record_selector_5:
+ movl (a0,d1),d1
+ movl d1,8(a1)
+copy_strict_record_selector_6:
+
+ movl -4(d0),a0
+ movl a0,(a1)
+
+ movl copy_sp_offset_5+4(sp),d1
+
+ testb $2,a0b
+ jne in_hnf_2
+ hlt
+
+copy_arity_0_node2_:
+ jl copy_selector_2
+
+ mov a0,-12(a3)
+ sub $12,a3
+ mov a3,(a2)
+ lea 1(a3),d0
+
+ add $4,a2
+ mov d0,(a1)
+
+ sub $1,d1
+ jae copy_lp2
+ ret
+
+copy_string_or_array_2:
+ movl a1,a0
+ jne copy_array_2
+
+ sub heap_p1_copy_sp_offset+4(sp),a1
+ cmp semi_space_size_sp_offset+4(sp),a1
+ jae copy_string_or_array_constant
+
+ mov 4(a0),a1
+ add $4,a2
+
+ add $3,a1
+
+ movl d1,copy_sp_offset_5+4(sp)
+
+ mov a1,d0
+ and $-4,a1
+
+ shr $2,d0
+ sub a1,a3
+
+ mov (a0),d1
+ add $4,a0
+
+ mov d1,-8(a3)
+ sub $8,a3
+
+ mov a3,-4(a2)
+ lea 1(a3),a1
+
+ mov a1,-4(a0)
+ lea 4(a3),a1
+
+cp_s_arg_lp2:
+ mov (a0),d1
+ add $4,a0
+
+ mov d1,(a1)
+ add $4,a1
+
+ subl $1,d0
+ jge cp_s_arg_lp2
+
+ movl copy_sp_offset_5+4(sp),d1
+
+ sub $1,d1
+ jae copy_lp2
+ ret
+
+copy_array_2:
+ sub heap_p1_copy_sp_offset+4(sp),a1
+ cmp semi_space_size_sp_offset+4(sp),a1
+ jae copy_string_or_array_constant
+
+ movl d1,copy_sp_offset_5+4(sp)
+
+ movl 8(a0),d0
+ test d0,d0
+ je copy_array_a2
+
+ movzwl -2(d0),d1
+
+ test d1,d1
+ je copy_strict_basic_array_2
+
+ subl $256,d1
+ imull 4(a0),d1
+ jmp copy_array_a3
+
+copy_array_a2:
+ movl 4(a0),d1
+copy_array_a3:
+ movl a4,a1
+ lea 12(a4,d1,4),a4
+
+ movl a1,(a2)
+ movl (a0),d0
+
+ addl $4,a2
+ movl d0,(a1)
+
+ lea 1(a1),d0
+ addl $4,a1
+
+ movl d0,(a0)
+ addl $4,a0
+
+ lea 1(d1),d0
+ jmp cp_s_arg_lp2
+
+copy_strict_basic_array_2:
+ movl 4(a0),d1
+ cmpl $INT+2,d0
+ je copy_int_array_2
+
+ cmpl $BOOL+2,d0
+ je copy_bool_array_2
+
+ addl d1,d1
+copy_int_array_2:
+ shl $2,d1
+ lea -12(a3),a1
+
+ subl d1,a1
+ movl (a0),d0
+
+ shr $2,d1
+ movl a1,(a2)
+
+ addl $4,a2
+ movl a1,a3
+
+ movl d0,(a1)
+ lea 1(a1),d0
+
+ addl $4,a1
+ movl d0,(a0)
+
+ addl $4,a0
+ lea 1(d1),d0
+ jmp cp_s_arg_lp2
+
+copy_bool_array_2:
+ add $3,d1
+ shr $2,d1
+ jmp copy_int_array_2
+
+copy_string_or_array_constant:
+ movl a0,(a2)
+ add $4,a2
+
+ sub $1,d1
+ jae copy_lp2
+ ret
+
+end_copy1:
+#ifdef FINALIZERS
+ movl $finalizer_list,a0
+ movl $free_finalizer_list,a1
+ movl finalizer_list,a2
+
+determine_free_finalizers_after_copy:
+ movl (a2),d0
+ testb $1,d0b
+ je finalizer_not_used_after_copy
+
+ movl 4(a2),a2
+ subl $1,d0
+ movl d0,(a0)
+ lea 4(d0),a0
+ jmp determine_free_finalizers_after_copy
+
+finalizer_not_used_after_copy:
+ cmpl $__Nil-4,a2
+ je end_finalizers_after_copy
+
+ movl a2,(a1)
+ lea 4(a2),a1
+ movl 4(a2),a2
+ jmp determine_free_finalizers_after_copy
+
+end_finalizers_after_copy:
+ movl a2,(a0)
+ movl a2,(a1)
+#endif
+
+ lea -32(a3),a1
+
+ movl a4,a2
+ movl a4_copy_sp_offset(sp),a4
+ lea 44(sp),sp
+
+ movl a2,free_heap_offset(a4)
+ movl a3,heap_end_after_gc_offset(a4)
+ movl a1,end_heap_offset(a4)
diff --git a/thread/ifileIO3.s b/thread/ifileIO3.s
new file mode 100644
index 0000000..5027477
--- /dev/null
+++ b/thread/ifileIO3.s
@@ -0,0 +1,738 @@
+
+#define d0 %eax
+#define d1 %ebx
+#define a0 %ecx
+#define a1 %edx
+#define a2 %ebp
+#define a3 %esi
+#define a4 %edi
+#define a5 %esp
+#define sp %esp
+
+// # saved registers: %ebx %esi %edi %ebp
+// # d1 a3 a4 a2
+
+ .data
+#if defined (DOS) || defined (_WINDOWS_) || defined (ELF)
+ .align 8
+#else
+ .align 3
+#endif
+
+tmp_real: .double 0
+freadstring_error:
+ .ascii "Error in freadsubstring parameters."
+ .byte 10,0
+ .byte 0,0,0
+fwritestring_error:
+ .ascii "Error in fwritesubstring parameters."
+ .byte 10,0
+ .byte 0,0
+ .text
+
+ .globl stdioF
+ .globl stderrF
+ .globl openF
+ .globl closeF
+ .globl reopenF
+ .globl readFC
+ .globl readFI
+ .globl readFR
+ .globl readFS
+ .globl readFString
+ .globl readLineF
+ .globl writeFC
+ .globl writeFI
+ .globl writeFR
+ .globl writeFS
+ .globl writeFString
+ .globl endF
+ .globl errorF
+ .globl positionF
+ .globl seekF
+ .globl shareF
+#ifndef LINUX
+ .globl flushF
+#endif
+ .globl openSF
+ .globl readSFC
+ .globl readSFI
+ .globl readSFR
+ .globl readSFS
+ .globl readLineSF
+ .globl endSF
+ .globl positionSF
+ .globl seekSF
+
+// # imports
+
+ .globl @open_file
+ .globl @open_stdio
+ .globl @open_stderr
+ .globl @re_open_file
+ .globl @close_file
+ .globl @file_read_char
+ .globl @file_read_int
+ .globl @file_read_real
+ .globl @file_read_characters
+ .globl @file_read_line
+ .globl @file_write_char
+ .globl @file_write_int
+ .globl @file_write_real
+ .globl @file_write_characters
+ .globl @file_end
+ .globl @file_error
+ .globl @file_position
+ .globl @file_seek
+ .globl @file_share
+#ifndef LINUX
+ .globl @flush_file_buffer
+#endif
+ .globl @open_s_file
+ .globl @file_read_s_char
+ .globl @file_read_s_int
+ .globl @file_read_s_real
+ .globl @file_read_s_string
+ .globl @file_read_s_line
+ .globl @file_s_end
+ .globl @file_s_position
+ .globl @file_s_seek
+
+ .globl collect_0
+ .globl collect_1
+
+ .globl __STRING__
+
+stdioF: call @open_stdio
+ movl d0,d1
+ movl $-1,d0
+ ret
+
+stderrF: call @open_stderr
+ movl d0,d1
+ movl $-1,d0
+ ret
+
+openF: pushl d0
+ addl $4,a0
+ pushl a0
+ call @open_file
+ addl $8,sp
+
+ xorl d1,d1
+ testl d0,d0
+ setns %bl
+ movl (sp),a2
+ movl $-1,(sp)
+ jmp *a2
+
+closeF: pushl d1
+ call @close_file
+ addl $4,sp
+ ret
+
+reopenF:
+// # popl d0
+// # pushl d0
+ pushl d1
+ call @re_open_file
+ addl $8,sp
+
+ xchg d0,d1
+
+ movl (sp),a2
+ movl $-1,(sp)
+ jmp *a2
+
+readFC:
+ pushl d1
+
+ pushl d1
+ call @file_read_char
+ addl $4,sp
+
+ movl 4(sp),a2
+ movl $-1,4(sp)
+
+ cmpl $-1,d0
+ je readFC_eof
+
+ movl $1,d1
+ jmp *a2
+
+readFC_eof:
+ xorl d0,d0
+ xorl d1,d1
+ jmp *a2
+
+readFI:
+ pushl d1
+
+ subl $8,sp
+ lea 4(sp),a2
+ movl a2,(sp)
+ pushl d1
+ call @file_read_int
+ addl $8,sp
+
+ movl d0,d1
+ popl d0
+
+ movl 4(sp),a2
+ movl $-1,4(sp)
+ jmp *a2
+
+readFR:
+ pushl $tmp_real
+ pushl d1
+ finit
+ call @file_read_real
+ addl $8,sp
+
+ fldl tmp_real
+ fstp %st(1)
+
+ xchg d0,d1
+
+ movl (sp),a2
+ movl $-1,(sp)
+ jmp *a2
+
+readFString:
+ movl 4(a0),a2
+ cmpl a2,d1
+ jae readFString_error
+
+ subl d1,a2
+ cmpl a2,d0
+ ja readFString_error
+
+ movl (sp),a1
+ pushl a0
+
+ pushl d0
+ movl sp,a2
+ lea 8(a0,d1),a0
+
+ pushl a0
+ pushl a2
+ pushl a1
+ call @file_read_characters
+ addl $12+4,sp
+
+ popl a0
+
+ movl d0,d1
+ popl d0
+
+ addl $4,sp
+ popl a2
+ pushl $-1
+ jmp *a2
+
+readFString_error:
+ movl $freadstring_error,a2
+ jmp print_error
+
+readFS: popl a1
+ movl (a4),a2
+ lea 3(a1),d0
+ andl $-4,d0
+ lea -32+8(a2,d0),d0
+ cmpl 4(a4),d0
+ ja readFS_gc
+readFS_r_gc:
+ movl $__STRING__+2,(a2)
+
+ lea 8(a2),d0
+ addl $4,a2
+
+ pushl d0
+ movl a1,(a2)
+ pushl a2
+ pushl d1
+ call @file_read_characters
+ addl $12,sp
+
+readFS_end:
+ lea -4(a2),a0
+
+ addl $3,d0
+ andl $-4,d0
+ lea 4(a2,d0),a2
+ movl a2,(a4)
+
+ movl $-1,d0
+ ret
+
+readFS_gc: pushl a1
+ movl d0,a2
+ call collect_0l
+ popl a1
+ jmp readFS_r_gc
+
+readLineF:
+ movl (a4),a2
+ lea -32+(4*(32+2))(a2),d0
+ cmpl 4(a4),d0
+ ja readLineF_gc
+
+readLineF_r_gc:
+ movl $__STRING__+2,(a2)
+ lea 8(a2),a0
+ addl $4,a2
+
+ pushl a0
+ movl 4(a4),a1
+ addl $32-4,a1
+ subl a2,a1
+ pushl a1
+ pushl d1
+ call @file_read_line
+ addl $12,sp
+
+ movl d0,(a2)
+
+ testl d0,d0
+ jns readFS_end
+
+ lea -4(a2),a0
+
+readLineF_again:
+ movl 4(a4),a1
+ addl $32,a1
+ lea -8(a1),d0
+ subl a0,d0
+ movl d0,4(a0)
+ movl a1,(a4)
+
+ lea -32+4*(32+2)(a1,d0),a2
+ call collect_1l
+
+ movl 4(a0),d0
+ lea 8(a0),a1
+
+ pushl a2
+
+ movl $__STRING__+2,(a2)
+
+ lea 3(d0),a0
+ shr $2,a0
+
+ movl d0,4(a2)
+ addl $8,a2
+ jmp st_copy_string1
+
+copy_st_lp1:
+ movl (a1),d0
+ addl $4,a1
+ movl d0,(a2)
+ addl $4,a2
+st_copy_string1:
+ subl $1,a0
+ jnc copy_st_lp1
+
+ pushl a2
+ movl 4(a4),d0
+ addl $32,d0
+ subl a2,d0
+ pushl d0
+ pushl d1
+ call @file_read_line
+ addl $12,sp
+
+ popl a0
+
+ testl d0,d0
+ js readLineF_again
+
+ addl d0,4(a0)
+ addl $3,d0
+ andl $-4,d0
+ addl d0,a2
+ movl a2,(a4)
+
+ movl $-1,d0
+ ret
+
+readLineF_gc:
+ movl d0,a2
+ call collect_0l
+ jmp readLineF_r_gc
+
+writeFC:
+ movl d0,(sp)
+ pushl d1
+ movl d0,d1
+ call @file_write_char
+ addl $8,sp
+
+ movl $-1,d0
+ ret
+
+writeFI:
+ movl d0,(sp)
+ pushl d1
+ movl d0,d1
+ call @file_write_int
+ addl $8,sp
+
+ movl $-1,d0
+ ret
+
+writeFR:
+ pushl d1
+ subl $8,sp
+ fstpl (sp)
+ finit
+ call @file_write_real
+ addl $12,sp
+
+ movl $-1,d0
+ ret
+
+writeFS:
+ pushl d1
+ pushl 4(a0)
+ addl $8,a0
+ pushl a0
+ call @file_write_characters
+ addl $12,sp
+ movl $-1,d0
+ ret
+
+writeFString:
+ movl 4(a0),a2
+ cmpl a2,d1
+ jae writeFString_error
+
+ subl d1,a2
+ cmpl a2,d0
+ ja writeFString_error
+
+ lea 8(a0,d1),a0
+ movl (sp),d1
+
+ pushl d0
+ pushl a0
+ call @file_write_characters
+ addl $12+4,sp
+
+ movl $-1,d0
+
+ ret
+
+writeFString_error:
+ movl $fwritestring_error,a2
+ jmp print_error
+
+endF:
+ pushl d1
+ call @file_end
+ addl $4,sp
+
+ xchg d0,d1
+
+ movl (sp),a2
+ movl $-1,(sp)
+ jmp *a2
+
+errorF:
+ pushl d1
+ call @file_error
+ addl $4,sp
+
+ xchg d0,d1
+
+ movl (sp),a2
+ movl $-1,(sp)
+ jmp *a2
+
+positionF:
+ pushl d1
+ call @file_position
+ addl $4,sp
+
+ xchg d0,d1
+
+ movl (sp),a2
+ movl $-1,(sp)
+ jmp *a2
+
+seekF:
+ pushl d1
+ call @file_seek
+ addl $12,sp
+
+ xchg d0,d1
+
+ movl (sp),a2
+ movl $-1,(sp)
+ jmp *a2
+
+shareF:
+ pushl d1
+ call @file_share
+ addl $4,sp
+
+ movl $-1,d0
+ ret
+
+#ifndef LINUX
+flushF:
+ pushl d1
+ call @flush_file_buffer
+ movl 4(sp),a2
+ xchg d0,d1
+ movl $-1,4(sp)
+ movl a2,(sp)
+ ret
+#endif
+
+openSF: pushl d0
+ addl $4,a0
+ pushl a0
+ call @open_s_file
+ addl $8,sp
+
+ xorl d1,d1
+ testl d0,d0
+ setns %bl
+
+ movl (sp),a2
+ movl $0,(sp)
+ jmp *a2
+
+readSFC:
+ pushl d0
+ movl sp,a2
+ pushl a2
+ pushl d1
+ call @file_read_s_char
+ addl $8,sp
+
+ popl a0
+ popl a2
+
+ pushl a0
+ pushl d1
+
+ cmpl $-1,d0
+ je readSFC_eof
+
+ movl $1,d1
+ jmp *a2
+
+readSFC_eof:
+ xorl d0,d0
+ xorl d1,d1
+ jmp *a2
+
+readSFI:
+ pushl d0
+ movl sp,a2
+ subl $4,sp
+ pushl a2
+ subl $4,a2
+ pushl a2
+ pushl d1
+ call @file_read_s_int
+ addl $12,sp
+
+ popl a0
+ popl a1
+ popl a2
+
+ pushl a1
+ pushl d1
+ movl d0,d1
+ movl a0,d0
+ jmp *a2
+
+readSFR:
+ pushl d0
+ movl sp,a2
+ pushl a2
+ pushl $tmp_real
+ pushl d1
+ finit
+ call @file_read_s_real
+ addl $12,sp
+
+ fldl tmp_real
+ xchg d0,d1
+ fstp %st(1)
+
+ popl a0
+ movl (sp),a2
+ movl a0,(sp)
+ jmp *a2
+
+readSFS:
+ popl a1
+ movl (a4),a2
+ lea 3(a1),a0
+ andl $-4,a0
+ lea -32+8(a2,a0),a0
+ cmpl 4(a4),a0
+ ja readSFS_gc
+
+readSFS_r_gc:
+ movl $__STRING__+2,(a2)
+ addl $4,a2
+
+ pushl d0
+ movl sp,a0
+ pushl a0
+ pushl a2
+ pushl a1
+ pushl d1
+ call @file_read_s_string
+ addl $16,sp
+
+readSFS_end:
+ lea -4(a2),a0
+
+ addl $3,d0
+ andl $-4,d0
+ lea 4(a2,d0),a2
+ movl a2,(a4)
+
+ popl d0
+ ret
+
+readSFS_gc:
+ pushl a1
+ movl a0,a2
+ call collect_0l
+ popl a1
+ jmp readSFS_r_gc
+
+readLineSF:
+ movl (a4),a2
+ lea -32+(4*(32+2))(a2),a1
+ cmpl 4(a4),a1
+ ja readLineSF_gc
+
+readLineSF_r_gc:
+ movl $__STRING__+2,(a2)
+ lea 8(a2),a0
+ addl $4,a2
+
+ pushl d0
+ movl sp,a1
+ pushl a1
+ pushl a0
+ movl 4(a2),a1
+ addl $32-4,a1
+ subl a2,a1
+ pushl a1
+ pushl d1
+ call @file_read_s_line
+ addl $16,sp
+
+ movl d0,(a2)
+
+ testl d0,d0
+ jns readSFS_end
+
+ lea -4(a2),a0
+
+readLineSF_again:
+ movl 4(a4),a1
+ addl $32,a1
+ lea -8(a1),d0
+ subl a0,d0
+ movl d0,(a2)
+ movl a1,(a4)
+
+ lea -32+4*(32+2)(a1,d0),a2
+ call collect_1l
+
+ movl 4(a0),d0
+ lea 8(a0),a1
+
+ pushl a2
+
+ movl $__STRING__+2,(a2)
+
+ lea 3(d0),a0
+ shr $2,a0
+
+ movl d0,4(a2)
+ addl $8,a2
+ jmp st_copy_string2
+
+copy_st_lp2:
+ movl (a1),d0
+ addl $4,a1
+ movl d0,(a2)
+ addl $4,a2
+st_copy_string2:
+ subl $1,a0
+ jnc copy_st_lp2
+
+ lea 4(sp),d0
+ pushl d0
+ pushl a2
+ movl 4(a4),d0
+ addl $32,d0
+ subl a2,d0
+ pushl d0
+ pushl d1
+ call @file_read_s_line
+ addl $16,sp
+
+ popl a0
+
+ testl d0,d0
+ js readLineSF_again
+
+ addl d0,4(a0)
+ addl $3,d0
+ andl $-4,d0
+ addl d0,a2
+ movl a2,(a4)
+
+ popl d0
+ ret
+
+readLineSF_gc:
+ movl a1,a2
+ call collect_0l
+ jmp readLineSF_r_gc
+
+endSF:
+ pushl d0
+ pushl d1
+ call @file_s_end
+ addl $8,sp
+ ret
+
+positionSF:
+ pushl d0
+ pushl d1
+ call @file_s_position
+ addl $8,sp
+ ret
+
+seekSF:
+ popl a1
+ popl a0
+
+ pushl d0
+ movl sp,a2
+ pushl a2
+ pushl a0
+ pushl a1
+ pushl d1
+ call @file_s_seek
+ addl $16,sp
+
+ popl a0
+
+ xchg d0,d1
+
+ movl (sp),a2
+ movl a0,(sp)
+ jmp *a2
diff --git a/thread/imark.s b/thread/imark.s
new file mode 100644
index 0000000..5db23ec
--- /dev/null
+++ b/thread/imark.s
@@ -0,0 +1,1927 @@
+
+#define d0 %eax
+#define d1 %ebx
+#define a0 %ecx
+#define a1 %edx
+#define a2 %ebp
+#define a3 %esi
+#define a4 %edi
+#define sp %esp
+
+ movl heap_vector_offset(a4),d0
+ shrl $2,d0
+ movl d0,heap_vector_d4_offset(a4)
+
+#undef MARK_USING_REVERSAL
+
+ movl heap_size_33_offset(a4),d0
+ xorl d1,d1
+
+ movl d1,n_marked_words_offset(a4)
+ shl $5,d0
+
+ movl d0,heap_size_32_33_offset(a4)
+ movl d1,lazy_array_list_offset(a4)
+
+ lea -2000(sp),a3
+
+ movl caf_list,d0
+
+ movl a3,end_stack_offset(a4)
+
+ test d0,d0
+ je _end_mark_cafs
+
+_mark_cafs_lp:
+ movl (d0),d1
+ movl -4(d0),a2
+
+ pushl a2
+ lea 4(d0),a2
+ lea 4(d0,d1,4),d0
+ movl d0,end_vector_offset(a4)
+
+ call _mark_stack_nodes
+
+ popl d0
+ test d0,d0
+ jne _mark_cafs_lp
+
+_end_mark_cafs:
+ movl stack_top_offset(a4),a3
+ movl stack_p_offset(a4),a2
+
+ movl a3,end_vector_offset(a4)
+ call _mark_stack_nodes
+
+ movl lazy_array_list_offset(a4),a0
+
+ test a0,a0
+ je no_restore_arrays
+
+ push a4
+
+restore_arrays:
+ movl (a0),d1
+ movl $__ARRAY__+2,(a0)
+
+ cmpl $1,d1
+ je restore_array_size_1
+
+ lea (a0,d1,4),a1
+ movl 8(a1),d0
+ test d0,d0
+ je restore_lazy_array
+
+ movl d0,a2
+ push a1
+
+ xorl a1,a1
+ movl d1,d0
+ movzwl -2+2(a2),d1
+
+ div d1
+ movl d0,d1
+
+ pop a1
+ movl a2,d0
+
+restore_lazy_array:
+ movl 8(a0),a4
+ movl 4(a0),a2
+ movl d1,4(a0)
+ movl 4(a1),a3
+ movl d0,8(a0)
+ movl a2,4(a1)
+ movl a4,8(a1)
+
+ test d0,d0
+ je no_reorder_array
+
+ movzwl -2(d0),a1
+ subl $256,a1
+ movzwl -2+2(d0),a2
+ cmpl a1,a2
+ je no_reorder_array
+
+ addl $12,a0
+ imull a1,d1
+ movl a1,d0
+ lea (a0,d1,4),a1
+ movl a2,d1
+ subl a2,d0
+
+ call reorder
+
+no_reorder_array:
+ movl a3,a0
+ testl a0,a0
+ jne restore_arrays
+
+ jmp end_restore_arrays
+
+restore_array_size_1:
+ movl 4(a0),a2
+ movl 8(a0),a1
+ movl d1,4(a0)
+ movl 12(a0),d0
+ movl a2,12(a0)
+ movl d0,8(a0)
+
+ movl a1,a0
+ testl a0,a0
+ jne restore_arrays
+
+end_restore_arrays:
+ pop a4
+
+no_restore_arrays:
+
+#ifdef FINALIZERS
+ movl $finalizer_list,a0
+ movl $free_finalizer_list,a1
+
+ movl (a0),a2
+determine_free_finalizers_after_mark:
+ cmpl $__Nil-4,a2
+ je end_finalizers_after_mark
+
+ movl neg_heap_p3_offset(a4),d1
+ addl a2,d1
+ movl $31*4,d0
+ andl d1,d0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+ movl bit_set_table(d0),a3
+ testl (,d1,4),a3
+ je finalizer_not_used_after_mark
+
+ lea 4(a2),a0
+ movl 4(a2),a2
+ jmp determine_free_finalizers_after_mark
+
+finalizer_not_used_after_mark:
+ movl a2,(a1)
+ lea 4(a2),a1
+
+ movl 4(a2),a2
+ movl a2,(a0)
+ jmp determine_free_finalizers_after_mark
+
+end_finalizers_after_mark:
+ movl a2,(a1)
+#endif
+
+ call add_garbage_collect_time
+
+#ifndef ADJUST_HEAP_SIZE
+ movl heap_size_33_offset(a4),d0
+ shl $3,d0
+#else
+ movl bit_vector_size_offset(a4),d0
+ movl d0,a3
+ shl $2,a3
+
+ push a1
+ push d0
+
+ movl n_allocated_words_offset(a4),d0
+ addl n_marked_words_offset(a4),d0
+ shl $2,d0
+
+ mull @heap_size_multiple
+ shrd $8,a1,d0
+ shrl $8,a1
+
+ movl d0,d1
+ testl a1,a1
+
+ pop d0
+ pop a1
+
+ je not_largest_heap
+
+ movl heap_size_33_offset(a4),d1
+ shl $5,d1
+
+not_largest_heap:
+ cmpl a3,d1
+ jbe no_larger_heap
+
+ movl heap_size_33_offset(a4),a3
+ shl $5,a3
+ cmpl a3,d1
+ jbe not_larger_then_heap
+ movl a3,d1
+not_larger_then_heap:
+ movl d1,d0
+ shr $2,d0
+ movl d0,bit_vector_size_offset(a4)
+no_larger_heap:
+#endif
+ movl d0,a2
+
+ movl heap_vector_offset(a4),a3
+
+ shrl $5,a2
+
+ testb $31,d0b
+ je no_extra_word
+ movl $0,(a3,a2,4)
+no_extra_word:
+
+ subl n_marked_words_offset(a4),d0
+ shl $2,d0
+ movl d0,n_last_heap_free_bytes_offset(a4)
+
+ testl $2,@flags
+ je _no_heap_use_message2
+
+ pushl $marked_gc_string_1
+ call @ew_print_string
+ addl $4,sp
+
+ movl n_marked_words_offset(a4),d0
+ shll $2,d0
+ pushl d0
+ call @ew_print_int
+ addl $4,sp
+
+ pushl $heap_use_after_gc_string_2
+ call @ew_print_string
+ addl $4,sp
+
+_no_heap_use_message2:
+
+#ifdef FINALIZERS
+ call call_finalizers
+#endif
+
+ movl n_allocated_words_offset(a4),a3
+ xorl d1,d1
+
+ movl heap_vector_offset(a4),a0
+ movl d1,n_free_words_after_mark_offset(a4)
+
+_scan_bits:
+ cmpl (a0),d1
+ je _zero_bits
+ movl d1,(a0)
+ addl $4,a0
+ subl $1,a2
+ jne _scan_bits
+
+ jmp _end_scan
+
+_zero_bits:
+ lea 4(a0),a1
+ addl $4,a0
+ subl $1,a2
+ jne _skip_zero_bits_lp1
+ jmp _end_bits
+
+_skip_zero_bits_lp:
+ test d0,d0
+ jne _end_zero_bits
+_skip_zero_bits_lp1:
+ movl (a0),d0
+ addl $4,a0
+ subl $1,a2
+ jne _skip_zero_bits_lp
+
+ test d0,d0
+ je _end_bits
+ movl a0,d0
+ movl d1,-4(a0)
+ subl a1,d0
+ jmp _end_bits2
+
+_end_zero_bits:
+ movl a0,d0
+ subl a1,d0
+ shl $3,d0
+ addl d0,n_free_words_after_mark_offset(a4)
+ movl d1,-4(a0)
+
+ cmpl a3,d0
+ jb _scan_bits
+
+_found_free_memory:
+ movl a2,bit_counter_offset(a4)
+ movl a0,bit_vector_p_offset(a4)
+
+ lea -4(a1),d1
+ subl heap_vector_offset(a4),d1
+ shl $5,d1
+
+ movl heap_p3_offset(a4),a2
+ addl d1,a2
+ movl a2,free_heap_offset(a4)
+
+ movl stack_top_offset(a4),a3
+
+ lea (a2,d0,4),d1
+ movl d1,heap_end_after_gc_offset(a4)
+ subl $32,d1
+ movl d1,end_heap_offset(a4)
+
+ pop d1
+ pop d0
+ ret
+
+_end_bits:
+ movl a0,d0
+ subl a1,d0
+ addl $4,d0
+_end_bits2:
+ shl $3,d0
+ addl d0,n_free_words_after_mark_offset(a4)
+ cmpl a3,d0
+ jae _found_free_memory
+
+_end_scan:
+ movl a2,bit_counter_offset(a4)
+ jmp compact_gc
+
+/ a2: pointer to stack element
+/ a4: heap_vector
+/ d0,d1,a0,a1,a3: free
+
+_mark_stack_nodes:
+ cmpl end_vector_offset(a4),a2
+ je _end_mark_nodes
+
+_mark_stack_nodes_:
+ movl (a2),a0
+ movl neg_heap_p3_offset(a4),d1
+ movl heap_vector_d4_offset(a4),d0
+
+ addl $4,a2
+
+ addl a0,d1
+ cmpl heap_size_32_33_offset(a4),d1
+ jnc _mark_stack_nodes
+
+ movl $31*4,a1
+ andl d1,a1
+ shrl $7,d1
+ addl d0,d1
+
+ movl bit_set_table(a1),a3
+ testl (,d1,4),a3
+ jne _mark_stack_nodes
+
+ pushl a2
+
+#ifdef MARK_USING_REVERSAL
+ movl $1,a3
+ jmp __mark_node
+
+__end_mark_using_reversal:
+ popl a2
+ movl a0,-4(a2)
+ jmp _mark_stack_nodes
+#else
+ pushl $0
+
+ jmp _mark_arguments
+
+_mark_hnf_2:
+ cmpl $0x20000000,a3
+ jbe fits_in_word_6
+ orl $1,4(,d1,4)
+fits_in_word_6:
+ addl $3,n_marked_words_offset(a4)
+
+_mark_record_2_c:
+ movl 4(a0),d1
+ push d1
+
+ cmpl end_stack_offset(a4),sp
+ jb __mark_using_reversal
+
+_mark_node2:
+_shared_argument_part:
+ movl (a0),a0
+
+_mark_node:
+ movl neg_heap_p3_offset(a4),d1
+ movl heap_vector_d4_offset(a4),d0
+
+ addl a0,d1
+ cmpl heap_size_32_33_offset(a4),d1
+ jnc _mark_next_node
+
+ movl $31*4,a1
+ andl d1,a1
+ shrl $7,d1
+ addl d0,d1
+ movl bit_set_table(a1),a3
+ testl (,d1,4),a3
+ jne _mark_next_node
+
+_mark_arguments:
+ movl (a0),d0
+ test $2,d0
+ je _mark_lazy_node
+
+ movzwl -2(d0),a2
+ test a2,a2
+ je _mark_hnf_0
+
+ orl a3,(,d1,4)
+ addl $4,a0
+
+ cmpl $256,a2
+ jae _mark_record
+
+ subl $2,a2
+ je _mark_hnf_2
+ jb _mark_hnf_1
+
+_mark_hnf_3:
+ movl 4(a0),a1
+
+ cmpl $0x20000000,a3
+ jbe fits_in_word_1
+ orl $1,4(,d1,4)
+fits_in_word_1:
+
+ movl neg_heap_p3_offset(a4),d1
+ movl heap_vector_d4_offset(a4),a3
+ addl a1,d1
+
+ addl $3,n_marked_words_offset(a4)
+
+ movl $31*4,d0
+ andl d1,d0
+ shrl $7,d1
+ addl a3,d1
+
+ movl bit_set_table(d0),a3
+ testl (,d1,4),a3
+ jne _shared_argument_part
+
+_no_shared_argument_part:
+ orl a3,(,d1,4)
+ addl $1,a2
+
+ addl a2,n_marked_words_offset(a4)
+ lea (d0,a2,4),d0
+ lea -4(a1,a2,4),a1
+
+ cmpl $32*4,d0
+ jbe fits_in_word_2
+ orl $1,4(,d1,4)
+fits_in_word_2:
+
+ movl (a1),d1
+ subl $2,a2
+ pushl d1
+
+_push_hnf_args:
+ movl -4(a1),d1
+ subl $4,a1
+ pushl d1
+ subl $1,a2
+ jge _push_hnf_args
+
+ cmpl end_stack_offset(a4),sp
+ jae _mark_node2
+
+ jmp __mark_using_reversal
+
+_mark_hnf_1:
+ cmpl $0x40000000,a3
+ jbe fits_in_word_4
+ orl $1,4(,d1,4)
+fits_in_word_4:
+ addl $2,n_marked_words_offset(a4)
+ movl (a0),a0
+ jmp _mark_node
+
+_mark_lazy_node_1:
+ addl $4,a0
+ orl a3,(,d1,4)
+ cmpl $0x20000000,a3
+ jbe fits_in_word_3
+ orl $1,4(,d1,4)
+fits_in_word_3:
+ addl $3,n_marked_words_offset(a4)
+
+ cmpl $1,a2
+ je _mark_node2
+
+_mark_selector_node_1:
+ cmpl $-2,a2
+ movl (a0),a1
+ je _mark_indirection_node
+
+ movl neg_heap_p3_offset(a4),d1
+ addl a1,d1
+
+ movl $31*4,a3
+ andl d1,a3
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ cmpl $-3,a2
+
+ movl bit_set_table(a3),a3
+ jle _mark_record_selector_node_1
+
+ testl (,d1,4),a3
+ jne _mark_node3
+
+ movl (a1),a2
+ testl $2,a2
+ je _mark_node3
+
+ cmpw $2,-2(a2)
+ jbe _small_tuple_or_record
+
+_large_tuple_or_record:
+ movl 8(a1),d1
+ addl neg_heap_p3_offset(a4),d1
+
+ movl $31*4,a2
+ andl d1,a2
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(a2),a2
+ testl (,d1,4),a2
+ jne _mark_node3
+
+ movl -8(d0),d0
+ movl $__indirection,-4(a0)
+ movl a0,a2
+
+ movzwl 4(d0),d0
+ cmpl $8,d0
+ jl _mark_tuple_selector_node_1
+ movl 8(a1),a1
+ je _mark_tuple_selector_node_2
+ movl -12(a1,d0),a0
+ movl a0,(a2)
+ jmp _mark_node
+
+_mark_tuple_selector_node_2:
+ movl (a1),a0
+ movl a0,(a2)
+ jmp _mark_node
+
+_small_tuple_or_record:
+ movl -8(d0),d0
+ movl $__indirection,-4(a0)
+ movl a0,a2
+
+ movzwl 4(d0),d0
+_mark_tuple_selector_node_1:
+ movl (a1,d0),a0
+ movl a0,(a2)
+ jmp _mark_node
+
+_mark_record_selector_node_1:
+ je _mark_strict_record_selector_node_1
+
+ testl (,d1,4),a3
+ jne _mark_node3
+
+ movl (a1),a2
+ testl $2,a2
+ je _mark_node3
+
+ cmpw $258,-2(a2)
+ jbe _small_tuple_or_record
+
+ movl 8(a1),d1
+ addl neg_heap_p3_offset(a4),d1
+
+ movl $31*4,a2
+ andl d1,a2
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(a2),a2
+ testl (,d1,4),a2
+ jne _mark_node3
+
+ movl -8(d0),d0
+ movl $__indirection,-4(a0)
+ movl a0,a2
+
+ movzwl 4(d0),d0
+ cmpl $8,d0
+ jle _mark_record_selector_node_2
+ movl 8(a1),a1
+ subl $12,d0
+_mark_record_selector_node_2:
+ movl (a1,d0),a0
+
+ movl a0,(a2)
+ jmp _mark_node
+
+_mark_strict_record_selector_node_1:
+ testl (,d1,4),a3
+ jne _mark_node3
+
+ movl (a1),a2
+ testl $2,a2
+ je _mark_node3
+
+ cmpw $258,-2(a2)
+ jbe _select_from_small_record
+
+ movl 8(a1),d1
+ addl neg_heap_p3_offset(a4),d1
+
+ movl $31*4,a2
+ andl d1,a2
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(a2),a2
+ testl (,d1,4),a2
+ jne _mark_node3
+
+_select_from_small_record:
+ movl -8(d0),d0
+
+ subl $4,a0
+
+ movzwl 4(d0),d1
+ cmpl $8,d1
+ jle _mark_strict_record_selector_node_2
+ addl 8(a1),d1
+ movl -12(d1),d1
+ jmp _mark_strict_record_selector_node_3
+_mark_strict_record_selector_node_2:
+ movl (a1,d1),d1
+_mark_strict_record_selector_node_3:
+ movl d1,4(a0)
+
+ movzwl 6(d0),d1
+ testl d1,d1
+ je _mark_strict_record_selector_node_5
+ cmpl $8,d1
+ jle _mark_strict_record_selector_node_4
+ movl 8(a1),a1
+ subl $12,d1
+_mark_strict_record_selector_node_4:
+ movl (a1,d1),d1
+ movl d1,8(a0)
+_mark_strict_record_selector_node_5:
+
+ movl -4(d0),d0
+ movl d0,(a0)
+ jmp _mark_next_node
+
+_mark_indirection_node:
+_mark_node3:
+ movl a1,a0
+ jmp _mark_node
+
+_mark_next_node:
+ popl a0
+ test a0,a0
+ jne _mark_node
+
+ popl a2
+ cmpl end_vector_offset(a4),a2
+ jne _mark_stack_nodes_
+
+_end_mark_nodes:
+ ret
+
+_mark_lazy_node:
+ movl -4(d0),a2
+ test a2,a2
+ je _mark_real_or_file
+
+ cmpl $1,a2
+ jle _mark_lazy_node_1
+
+ cmpl $257,a2
+ jge _mark_closure_with_unboxed_arguments
+ incl a2
+ orl a3,(,d1,4)
+
+ addl a2,n_marked_words_offset(a4)
+ lea (a1,a2,4),a1
+ lea (a0,a2,4),a0
+
+ cmpl $32*4,a1
+ jbe fits_in_word_7
+ orl $1,4(,d1,4)
+fits_in_word_7:
+ subl $3,a2
+_push_lazy_args:
+ movl -4(a0),d1
+ subl $4,a0
+ push d1
+ subl $1,a2
+ jge _push_lazy_args
+
+ subl $4,a0
+
+ cmpl end_stack_offset(a4),sp
+ jae _mark_node2
+
+ jmp __mark_using_reversal
+
+_mark_closure_with_unboxed_arguments:
+ je _mark_real_or_file
+
+ movl a2,d0
+ andl $255,a2
+
+ shrl $8,d0
+ addl $1,a2
+
+ orl a3,(,d1,4)
+ addl a2,n_marked_words_offset(a4)
+ lea (a1,a2,4),a1
+
+ subl d0,a2
+
+ cmpl $32*4,a1
+ jbe fits_in_word_7_
+ orl $1,4(,d1,4)
+fits_in_word_7_:
+ subl $2,a2
+ jl _mark_next_node
+
+ lea 8(a0,a2,4),a0
+ jne _push_lazy_args
+
+_mark_closure_with_one_boxed_argument:
+ movl -4(a0),a0
+ jmp _mark_node
+
+_mark_hnf_0:
+ cmpl $INT+2,d0
+ jb _mark_real_file_or_string
+
+ orl a3,(,d1,4)
+
+ cmpl $CHAR+2,d0
+ ja _mark_normal_hnf_0
+
+_mark_bool:
+ addl $2,n_marked_words_offset(a4)
+
+ cmpl $0x40000000,a3
+ jbe _mark_next_node
+
+ orl $1,4(,d1,4)
+ jmp _mark_next_node
+
+_mark_normal_hnf_0:
+ incl n_marked_words_offset(a4)
+ jmp _mark_next_node
+
+_mark_real_file_or_string:
+ cmpl $__STRING__+2,d0
+ jbe _mark_string_or_array
+
+_mark_real_or_file:
+ orl a3,(,d1,4)
+ addl $3,n_marked_words_offset(a4)
+
+ cmpl $0x20000000,a3
+ jbe _mark_next_node
+
+ orl $1,4(,d1,4)
+ jmp _mark_next_node
+
+_mark_record:
+ subl $258,a2
+ je _mark_record_2
+ jl _mark_record_1
+
+_mark_record_3:
+ addl $3,n_marked_words_offset(a4)
+
+ cmpl $0x20000000,a3
+ jbe fits_in_word_13
+ orl $1,4(,d1,4)
+fits_in_word_13:
+ movl 4(a0),a1
+ movzwl -2+2(d0),d1
+
+ movl neg_heap_p3_offset(a4),d0
+ addl a1,d0
+
+ movl $31*4,a3
+ andl d0,a3
+ shrl $7,d0
+ addl heap_vector_d4_offset(a4),d0
+
+ subl $1,d1
+
+ movl bit_set_table(a3),a1
+ jb _mark_record_3_bb
+
+ testl (,d0,4),a1
+ jne _mark_node2
+
+ addl $1,a2
+ orl a1,(,d0,4)
+ addl a2,n_marked_words_offset(a4)
+ lea (a3,a2,4),a3
+
+ cmpl $32*4,a3
+ jbe _push_record_arguments
+ orl $1,4(,d0,4)
+_push_record_arguments:
+ movl 4(a0),a1
+ movl d1,a2
+ shl $2,d1
+ addl d1,a1
+ subl $1,a2
+ jge _push_hnf_args
+
+ jmp _mark_node2
+
+_mark_record_3_bb:
+ testl (,d0,4),a1
+ jne _mark_next_node
+
+ addl $1,a2
+ orl a1,(,d0,4)
+ addl a2,n_marked_words_offset(a4)
+ lea (a3,a2,4),a3
+
+ cmpl $32*4,a3
+ jbe _mark_next_node
+
+ orl $1,4(,d0,4)
+ jmp _mark_next_node
+
+_mark_record_2:
+ cmpl $0x20000000,a3
+ jbe fits_in_word_12
+ orl $1,4(,d1,4)
+fits_in_word_12:
+ addl $3,n_marked_words_offset(a4)
+
+ cmpw $1,-2+2(d0)
+ ja _mark_record_2_c
+ je _mark_node2
+ jmp _mark_next_node
+
+_mark_record_1:
+ cmpw $0,-2+2(d0)
+ jne _mark_hnf_1
+
+ jmp _mark_bool
+
+_mark_string_or_array:
+ je _mark_string_
+
+_mark_array:
+ movl 8(a0),a2
+ test a2,a2
+ je _mark_lazy_array
+
+ movzwl -2(a2),d0
+
+ testl d0,d0
+ je _mark_strict_basic_array
+
+ movzwl -2+2(a2),a2
+ testl a2,a2
+ je _mark_b_record_array
+
+ cmpl end_stack_offset(a4),sp
+ jb _mark_array_using_reversal
+
+ subl $256,d0
+ cmpl a2,d0
+ je _mark_a_record_array
+
+_mark_ab_record_array:
+ orl a3,(,d1,4)
+ movl 4(a0),a2
+
+ imull a2,d0
+ addl $3,d0
+
+ addl d0,n_marked_words_offset(a4)
+ lea -4(a0,d0,4),d0
+
+ addl neg_heap_p3_offset(a4),d0
+ shrl $7,d0
+ addl heap_vector_d4_offset(a4),d0
+
+ cmpl d0,d1
+ jae _end_set_ab_array_bits
+
+ incl d1
+ movl $1,a2
+ cmpl d0,d1
+ jae _last_ab_array_bits
+
+_mark_ab_array_lp:
+ orl a2,(,d1,4)
+ incl d1
+ cmpl d0,d1
+ jb _mark_ab_array_lp
+
+_last_ab_array_bits:
+ orl a2,(,d1,4)
+
+_end_set_ab_array_bits:
+ movl 4(a0),d0
+ movl 8(a0),a1
+ movzwl -2+2(a1),d1
+ movzwl -2(a1),a1
+ shll $2,d1
+ lea -1024(,a1,4),a1
+ pushl d1
+ pushl a1
+ lea 12(a0),a2
+ pushl end_vector_offset(a4)
+ jmp _mark_ab_array_begin
+
+_mark_ab_array:
+ movl 8(sp),d1
+ pushl d0
+ pushl a2
+ lea (a2,d1),d0
+
+ movl d0,end_vector_offset(a4)
+ call _mark_stack_nodes
+
+ movl 4+8(sp),d1
+ popl a2
+ popl d0
+ addl d1,a2
+_mark_ab_array_begin:
+ subl $1,d0
+ jnc _mark_ab_array
+
+ popl end_vector_offset(a4)
+ addl $8,sp
+ jmp _mark_next_node
+
+_mark_a_record_array:
+ orl a3,(,d1,4)
+ movl 4(a0),a2
+
+ imull a2,d0
+ movl d0,a1
+
+ addl $3,d0
+
+ addl d0,n_marked_words_offset(a4)
+ lea -4(a0,d0,4),d0
+
+ addl neg_heap_p3_offset(a4),d0
+ shrl $7,d0
+ addl heap_vector_d4_offset(a4),d0
+
+ cmpl d0,d1
+ jae _end_set_a_array_bits
+
+ incl d1
+ movl $1,a2
+ cmpl d0,d1
+ jae _last_a_array_bits
+
+_mark_a_array_lp:
+ orl a2,(,d1,4)
+ incl d1
+ cmpl d0,d1
+ jb _mark_a_array_lp
+
+_last_a_array_bits:
+ orl a2,(,d1,4)
+
+_end_set_a_array_bits:
+ lea 12(a0),a2
+ lea 12(a0,a1,4),d0
+
+ pushl end_vector_offset(a4)
+ movl d0,end_vector_offset(a4)
+ call _mark_stack_nodes
+ popl end_vector_offset(a4)
+
+ jmp _mark_next_node
+
+_mark_lazy_array:
+ cmpl end_stack_offset(a4),sp
+ jb _mark_array_using_reversal
+
+ orl a3,(,d1,4)
+ movl 4(a0),d0
+
+ movl d0,a1
+ addl $3,d0
+
+ addl d0,n_marked_words_offset(a4)
+ lea -4(a0,d0,4),d0
+
+ addl neg_heap_p3_offset(a4),d0
+ shrl $7,d0
+ addl heap_vector_d4_offset(a4),d0
+
+ cmpl d0,d1
+ jae _end_set_lazy_array_bits
+
+ incl d1
+ movl $1,a2
+ cmpl d0,d1
+ jae _last_lazy_array_bits
+
+_mark_lazy_array_lp:
+ orl a2,(,d1,4)
+ incl d1
+ cmpl d0,d1
+ jb _mark_lazy_array_lp
+
+_last_lazy_array_bits:
+ orl a2,(,d1,4)
+
+_end_set_lazy_array_bits:
+ lea 12(a0),a2
+ lea 12(a0,a1,4),d0
+
+ pushl end_vector_offset(a4)
+ movl d0,end_vector_offset(a4)
+ call _mark_stack_nodes
+ popl end_vector_offset(a4)
+
+ jmp _mark_next_node
+
+_mark_array_using_reversal:
+ pushl $0
+ movl $1,a3
+ jmp __mark_node
+
+_mark_strict_basic_array:
+ movl 4(a0),d0
+ cmpl $INT+2,a2
+ je _mark_strict_int_array
+ cmpl $BOOL+2,a2
+ je _mark_strict_bool_array
+_mark_strict_real_array:
+ addl d0,d0
+_mark_strict_int_array:
+ addl $3,d0
+ jmp _mark_basic_array_
+_mark_strict_bool_array:
+ addl $12+3,d0
+ shrl $2,d0
+ jmp _mark_basic_array_
+
+_mark_b_record_array:
+ movl 4(a0),a2
+ subl $256,d0
+ imull a2,d0
+ addl $3,d0
+ jmp _mark_basic_array_
+
+_mark_string_:
+ movl 4(a0),d0
+ addl $8+3,d0
+ shrl $2,d0
+
+_mark_basic_array_:
+ orl a3,(,d1,4)
+
+ addl d0,n_marked_words_offset(a4)
+ lea -4(a0,d0,4),d0
+
+ addl neg_heap_p3_offset(a4),d0
+ shrl $7,d0
+ addl heap_vector_d4_offset(a4),d0
+
+ cmpl d0,d1
+ jae _mark_next_node
+
+ incl d1
+ movl $1,a2
+ cmpl d0,d1
+ jae _last_string_bits
+
+_mark_string_lp:
+ orl a2,(,d1,4)
+ incl d1
+ cmpl d0,d1
+ jb _mark_string_lp
+
+_last_string_bits:
+ orl a2,(,d1,4)
+ jmp _mark_next_node
+
+__end_mark_using_reversal:
+ popl a1
+ test a1,a1
+ je _mark_next_node
+ movl a0,(a1)
+ jmp _mark_next_node
+#endif
+
+__mark_using_reversal:
+ pushl a0
+ movl $1,a3
+ movl (a0),a0
+ jmp __mark_node
+
+__mark_arguments:
+ movl (a0),d0
+ test $2,d0
+ je __mark_lazy_node
+
+ movzwl -2(d0),a2
+ testl a2,a2
+ je __mark_hnf_0
+
+ addl $4,a0
+
+ cmpl $256,a2
+ jae __mark__record
+
+ subl $2,a2
+ je __mark_hnf_2
+ jb __mark_hnf_1
+
+__mark_hnf_3:
+ movl bit_set_table(a1),a1
+ addl $3,n_marked_words_offset(a4)
+
+ orl a1,(,d1,4)
+
+ cmpl $0x20000000,a1
+
+ movl heap_vector_d4_offset(a4),a1
+
+ jbe fits__in__word__1
+ orl $1,4(,d1,4)
+fits__in__word__1:
+ movl 4(a0),d1
+ addl neg_heap_p3_offset(a4),d1
+
+ movl $31*4,d0
+ andl d1,d0
+ shrl $7,d1
+ addl a1,d1
+
+ movl bit_set_table(d0),a1
+ testl (,d1,4),a1
+ jne __shared_argument_part
+
+__no_shared_argument_part:
+ orl a1,(,d1,4)
+ movl 4(a0),a1
+
+ addl $1,a2
+ movl a3,4(a0)
+
+ addl a2,n_marked_words_offset(a4)
+ addl $4,a0
+
+ shl $2,a2
+ orl $1,(a1)
+
+ addl a2,d0
+ addl a2,a1
+
+ cmpl $32*4,d0
+ jbe fits__in__word__2
+ orl $1,4(,d1,4)
+fits__in__word__2:
+
+ movl -4(a1),a2
+ movl a0,-4(a1)
+ lea -4(a1),a3
+ movl a2,a0
+ jmp __mark_node
+
+__mark_hnf_1:
+ movl bit_set_table(a1),a1
+ addl $2,n_marked_words_offset(a4)
+ orl a1,(,d1,4)
+ cmpl $0x40000000,a1
+ jbe __shared_argument_part
+ orl $1,4(,d1,4)
+__shared_argument_part:
+ movl (a0),a2
+ movl a3,(a0)
+ lea 2(a0),a3
+ movl a2,a0
+ jmp __mark_node
+
+__mark_no_selector_2:
+ popl d1
+__mark_no_selector_1:
+ movl bit_set_table(a1),a1
+ addl $3,n_marked_words_offset(a4)
+ orl a1,(,d1,4)
+ cmpl $0x20000000,a1
+ jbe __shared_argument_part
+
+ orl $1,4(,d1,4)
+ jmp __shared_argument_part
+
+__mark_lazy_node_1:
+ je __mark_no_selector_1
+
+__mark_selector_node_1:
+ cmpl $-2,a2
+ je __mark_indirection_node
+
+ cmpl $-3,a2
+
+ pushl d1
+ movl (a0),a2
+ pushl d0
+ movl neg_heap_p3_offset(a4),d0
+
+ jle __mark_record_selector_node_1
+
+ addl a2,d0
+
+ movl d0,d1
+ andl $31*4,d0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(d0),d0
+ testl (,d1,4),d0
+ popl d0
+ jne __mark_no_selector_2
+
+ movl (a2),d1
+ testb $2,d1b
+ je __mark_no_selector_2
+
+ cmpw $2,-2(d1)
+ jbe __small_tuple_or_record
+
+__large_tuple_or_record:
+ movl 8(a2),d1
+ addl neg_heap_p3_offset(a4),d1
+
+ movl $31*4,a2
+ andl d1,a2
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(a2),a2
+ testl (,d1,4),a2
+ jne __mark_no_selector_2
+
+ movl -8(d0),d0
+ movl (a0),a1
+ movl $__indirection,-4(a0)
+ movl a0,a2
+
+ popl d1
+
+ movzwl 4(d0),d0
+ cmpl $8,d0
+ jl __mark_tuple_selector_node_1
+ movl 8(a1),a1
+ je __mark_tuple_selector_node_2
+ subl $12,d0
+ movl (a1,d0),a0
+ movl a0,(a2)
+ jmp __mark_node
+
+__mark_tuple_selector_node_2:
+ movl (a1),a0
+ movl a0,(a2)
+ jmp __mark_node
+
+__small_tuple_or_record:
+ movl -8(d0),d0
+ movl (a0),a1
+ movl $__indirection,-4(a0)
+ movl a0,a2
+
+ popl d1
+
+ movzwl 4(d0),d0
+__mark_tuple_selector_node_1:
+ movl (a1,d0),a0
+ movl a0,(a2)
+ jmp __mark_node
+
+__mark_record_selector_node_1:
+ je __mark_strict_record_selector_node_1
+
+ addl a2,d0
+
+ movl d0,d1
+ andl $31*4,d0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(d0),d0
+ testl (,d1,4),d0
+ popl d0
+ jne __mark_no_selector_2
+
+ movl (a2),d1
+ testb $2,d1b
+ je __mark_no_selector_2
+
+ cmpw $258,-2(d1)
+ jbe __small_record
+
+ movl 8(a2),d1
+ addl neg_heap_p3_offset(a4),d1
+
+ movl $31*4,a2
+ andl d1,a2
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(a2),a2
+ testl (,d1,4),a2
+ jne __mark_no_selector_2
+
+__small_record:
+ movl -8(d0),d0
+ movl (a0),a1
+ movl $__indirection,-4(a0)
+ movl a0,a2
+
+ popl d1
+
+ movzwl 4(d0),d0
+ cmpl $8,d0
+ jle __mark_record_selector_node_2
+ movl 8(a1),a1
+ subl $12,d0
+__mark_record_selector_node_2:
+ movl (a1,d0),a0
+
+ movl a0,(a2)
+ jmp __mark_node
+
+__mark_strict_record_selector_node_1:
+ addl a2,d0
+
+ movl d0,d1
+ andl $31*4,d0
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(d0),d0
+ testl (,d1,4),d0
+ popl d0
+ jne __mark_no_selector_2
+
+ movl (a2),d1
+ testb $2,d1b
+ je __mark_no_selector_2
+
+ cmpw $258,-2(d1)
+ jle __select_from_small_record
+
+ movl 8(a2),d1
+ addl neg_heap_p3_offset(a4),d1
+
+ movl $31*4,a2
+ andl d1,a2
+ shrl $7,d1
+ addl heap_vector_d4_offset(a4),d1
+
+ movl bit_set_table(a2),a2
+ testl (,d1,4),a2
+ jne __mark_no_selector_2
+
+__select_from_small_record:
+ movl -8(d0),d0
+ movl (a0),a1
+ popl d1
+ subl $4,a0
+
+ movzwl 4(d0),d1
+ cmpl $8,d1
+ jle __mark_strict_record_selector_node_2
+ addl 8(a1),d1
+ movl -12(d1),d1
+ jmp __mark_strict_record_selector_node_3
+__mark_strict_record_selector_node_2:
+ movl (a1,d1),d1
+__mark_strict_record_selector_node_3:
+ movl d1,4(a0)
+
+ movzwl 6(d0),d1
+ testl d1,d1
+ je __mark_strict_record_selector_node_5
+ cmpl $8,d1
+ jle __mark_strict_record_selector_node_4
+ movl 8(a1),a1
+ subl $12,d1
+__mark_strict_record_selector_node_4:
+ movl (a1,d1),d1
+ movl d1,8(a0)
+__mark_strict_record_selector_node_5:
+
+ movl -4(d0),d0
+ movl d0,(a0)
+ jmp __mark_node
+
+__mark_indirection_node:
+ movl (a0),a0
+ jmp __mark_node
+
+__mark_hnf_2:
+ movl bit_set_table(a1),a1
+ addl $3,n_marked_words_offset(a4)
+ orl a1,(,d1,4)
+ cmpl $0x20000000,a1
+ jbe fits__in__word__6
+ orl $1,4(,d1,4)
+fits__in__word__6:
+
+__mark_record_2_c:
+ movl (a0),d0
+ movl 4(a0),a2
+ orl $2,d0
+ movl a3,4(a0)
+ movl d0,(a0)
+ lea 4(a0),a3
+ movl a2,a0
+
+__mark_node:
+ movl neg_heap_p3_offset(a4),d1
+ movl heap_vector_d4_offset(a4),d0
+
+ addl a0,d1
+ cmpl heap_size_32_33_offset(a4),d1
+ jae __mark_next_node
+
+ movl $31*4,a1
+ andl d1,a1
+ shrl $7,d1
+ addl d0,d1
+ movl bit_set_table(a1),a2
+ testl (,d1,4),a2
+ je __mark_arguments
+
+__mark_next_node:
+ testl $3,a3
+ jne __mark_parent
+
+ movl -4(a3),a2
+ movl (a3),a1
+ movl a0,(a3)
+ movl a1,-4(a3)
+ subl $4,a3
+
+ movl a2,a0
+ andl $3,a2
+ andl $-4,a0
+ orl a2,a3
+ jmp __mark_node
+
+__mark_parent:
+ movl a3,d1
+ andl $-4,a3
+ je __end_mark_using_reversal
+
+ andl $3,d1
+ movl (a3),a2
+ movl a0,(a3)
+
+ subl $1,d1
+ je __argument_part_parent
+
+ lea -4(a3),a0
+ movl a2,a3
+ jmp __mark_next_node
+
+__argument_part_parent:
+ andl $-4,a2
+ movl a3,a1
+ movl -4(a2),a0
+ movl (a2),d1
+ movl d1,-4(a2)
+ movl a1,(a2)
+ lea 2-4(a2),a3
+ jmp __mark_node
+
+__mark_lazy_node:
+ movl -4(d0),a2
+ testl a2,a2
+ je __mark_real_or_file
+
+ addl $4,a0
+ cmpl $1,a2
+ jle __mark_lazy_node_1
+ cmpl $257,a2
+ jge __mark_closure_with_unboxed_arguments
+
+ addl $1,a2
+ movl a1,d0
+ movl bit_set_table(a1),a1
+ addl a2,n_marked_words_offset(a4)
+
+ lea (d0,a2,4),d0
+ subl $2,a2
+
+ orl a1,(,d1,4)
+
+ cmpl $32*4,d0
+ jbe fits__in__word__7
+ orl $1,4(,d1,4)
+fits__in__word__7:
+__mark_closure_with_unboxed_arguments__2:
+ lea (a0,a2,4),a1
+ movl (a0),d0
+ orl $2,d0
+ movl d0,(a0)
+ movl (a1),a0
+ movl a3,(a1)
+ movl a1,a3
+ jmp __mark_node
+
+__mark_closure_with_unboxed_arguments:
+ je __mark_closure_1_with_unboxed_argument
+
+ movl bit_set_table(a1),d0
+ orl d0,(,d1,4)
+
+ movl a2,d0
+ andl $255,a2
+
+ addl $1,a2
+ shrl $8,d0
+ addl a2,n_marked_words_offset(a4)
+
+ lea (a1,a2,4),a1
+ subl d0,a2
+
+ cmpl $32*4,a1
+ jbe fits__in_word_7_
+ orl $1,4(,d1,4)
+fits__in_word_7_:
+ subl $2,a2
+ jg __mark_closure_with_unboxed_arguments__2
+ je __shared_argument_part
+ subl $4,a0
+ jmp __mark_next_node
+
+__mark_closure_1_with_unboxed_argument:
+ subl $4,a0
+ jmp __mark_real_or_file
+
+__mark_hnf_0:
+ cmpl $INT+2,d0
+ jne __no_int_3
+
+ movl 4(a0),a2
+ cmpl $33,a2
+ jb ____small_int
+
+__mark_bool_or_small_string:
+ movl bit_set_table(a1),a1
+ addl $2,n_marked_words_offset(a4)
+ orl a1,(,d1,4)
+ cmpl $0x40000000,a1
+ jbe __mark_next_node
+ orl $1,4(,d1,4)
+ jmp __mark_next_node
+
+____small_int:
+ lea small_integers(,a2,8),a0
+ jmp __mark_next_node
+
+__no_int_3:
+ jb __mark_real_file_or_string
+
+ cmpl $CHAR+2,d0
+ jne __no_char_3
+ movzbl 4(a0),a2
+ lea static_characters(,a2,8),a0
+ jmp __mark_next_node
+
+__no_char_3:
+ jb __mark_bool_or_small_string
+
+ lea ZERO_ARITY_DESCRIPTOR_OFFSET-2(d0),a0
+ jmp __mark_next_node
+
+__mark_real_file_or_string:
+ cmpl $__STRING__+2,d0
+ jbe __mark_string_or_array
+
+__mark_real_or_file:
+ movl bit_set_table(a1),a1
+ addl $3,n_marked_words_offset(a4)
+
+ orl a1,(,d1,4)
+
+ cmpl $0x20000000,a1
+ jbe __mark_next_node
+
+ orl $1,4(,d1,4)
+ jmp __mark_next_node
+
+__mark__record:
+ subl $258,a2
+ je __mark_record_2
+ jl __mark_record_1
+
+__mark_record_3:
+ movl bit_set_table(a1),a1
+ addl $3,n_marked_words_offset(a4)
+ orl a1,(,d1,4)
+ cmpl $0x20000000,a1
+ jbe fits__in__word__13
+ orl $1,4(,d1,4)
+fits__in__word__13:
+ movzwl -2+2(d0),d1
+
+ movl 4(a0),d0
+ addl neg_heap_p3_offset(a4),d0
+
+ movl $31*4,a1
+ andl d0,a1
+ shrl $7,d0
+ addl heap_vector_d4_offset(a4),d0
+
+ pushl a3
+
+ movl bit_set_table(a1),a3
+ testl (,d0,4),a3
+ jne __shared_record_argument_part
+
+ addl $1,a2
+ orl a3,(,d0,4)
+
+ lea (a1,a2,4),a1
+ addl a2,n_marked_words_offset(a4)
+
+ popl a3
+
+ cmpl $32*4,a1
+ jbe fits__in__word__14
+ orl $1,4(,d0,4)
+fits__in__word__14:
+ subl $1,d1
+ movl 4(a0),a1
+ jl __mark_record_3_bb
+ je __shared_argument_part
+
+ movl a3,4(a0)
+ addl $4,a0
+
+ subl $1,d1
+ je __mark_record_3_aab
+
+ lea (a1,d1,4),a3
+ movl (a1),d0
+ orl $1,d0
+ movl (a3),a2
+ movl d0,(a1)
+ movl a0,(a3)
+ movl a2,a0
+ jmp __mark_node
+
+__mark_record_3_bb:
+ subl $4,a0
+ jmp __mark_next_node
+
+__mark_record_3_aab:
+ movl (a1),a2
+ movl a0,(a1)
+ lea 1(a1),a3
+ movl a2,a0
+ jmp __mark_node
+
+__shared_record_argument_part:
+ movl 4(a0),a1
+
+ popl a3
+
+ test d1,d1
+ jne __shared_argument_part
+ subl $4,a0
+ jmp __mark_next_node
+
+__mark_record_2:
+ movl bit_set_table(a1),a1
+ addl $3,n_marked_words_offset(a4)
+ orl a1,(,d1,4)
+ cmpl $0x20000000,a1
+ jbe fits__in__word_12
+ orl $1,4(,d1,4)
+fits__in__word_12:
+ cmpw $1,-2+2(d0)
+ ja __mark_record_2_c
+ je __shared_argument_part
+ subl $4,a0
+ jmp __mark_next_node
+
+__mark_record_1:
+ cmpw $0,-2+2(d0)
+ jne __mark_hnf_1
+ subl $4,a0
+ jmp __mark_bool_or_small_string
+
+__mark_string_or_array:
+ je __mark_string_
+
+__mark_array:
+ movl 8(a0),a2
+ test a2,a2
+ je __mark_lazy_array
+
+ movzwl -2(a2),d0
+ test d0,d0
+ je __mark_strict_basic_array
+
+ movzwl -2+2(a2),a2
+ test a2,a2
+ je __mark_b_record_array
+
+ subl $256,d0
+ cmpl a2,d0
+ je __mark_a_record_array
+
+__mark__ab__record__array:
+ pushl a1
+ pushl d1
+ movl a2,d1
+
+ movl 4(a0),a2
+ addl $8,a0
+ pushl a0
+
+ shl $2,a2
+ movl d0,a1
+ imull a2,a1
+
+ subl d1,d0
+ addl $4,a0
+ addl a0,a1
+
+ call reorder
+
+ popl a0
+
+ xchg d1,d0
+ movl -4(a0),a2
+ imull a2,d0
+ imull a2,d1
+ addl d1,n_marked_words_offset(a4)
+ addl d0,d1
+
+ movl neg_heap_p3_offset(a4),a2
+ shl $2,d1
+ addl a0,a2
+ addl d1,a2
+
+ popl d1
+ popl a1
+
+ movl bit_set_table(a1),a1
+ orl a1,(,d1,4)
+
+ lea (a0,d0,4),a1
+ jmp __mark_r_array
+
+__mark_a_record_array:
+ imull 4(a0),d0
+ addl $8,a0
+ jmp __mark_lr_array
+
+__mark_lazy_array:
+ movl 4(a0),d0
+ addl $8,a0
+
+__mark_lr_array:
+ movl bit_set_table(a1),a1
+ movl neg_heap_p3_offset(a4),a2
+ orl a1,(,d1,4)
+ lea (a0,d0,4),a1
+ addl a1,a2
+__mark_r_array:
+ shrl $7,a2
+ addl heap_vector_d4_offset(a4),a2
+
+ cmpl a2,d1
+ jae __skip_mark_lazy_array_bits
+
+ inc d1
+
+__mark_lazy_array_bits:
+ orl $1,(,d1,4)
+ inc d1
+ cmpl a2,d1
+ jbe __mark_lazy_array_bits
+
+__skip_mark_lazy_array_bits:
+ movl n_marked_words_offset(a4),a2
+ addl $3,a2
+ addl d0,a2
+
+ cmpl $1,d0
+ movl a2,n_marked_words_offset(a4)
+ jbe __mark_array_length_0_1
+
+ movl (a1),a2
+ movl (a0),d1
+ movl d1,(a1)
+ movl a2,(a0)
+
+ movl -4(a1),a2
+ subl $4,a1
+ movl lazy_array_list_offset(a4),d1
+ addl $2,a2
+ movl d1,(a1)
+ movl a2,-4(a0)
+ movl d0,-8(a0)
+ subl $8,a0
+ movl a0,lazy_array_list_offset(a4)
+
+ movl -4(a1),a0
+ movl a3,-4(a1)
+ lea -4(a1),a3
+ jmp __mark_node
+
+__mark_array_length_0_1:
+ lea -8(a0),a0
+ jb __mark_next_node
+
+ movl 12(a0),d1
+ movl 8(a0),a2
+ movl lazy_array_list_offset(a4),a1
+ movl a2,12(a0)
+ movl a1,8(a0)
+ movl d0,(a0)
+ movl a0,lazy_array_list_offset(a4)
+ movl d1,4(a0)
+ addl $4,a0
+
+ movl (a0),a2
+ movl a3,(a0)
+ lea 2(a0),a3
+ movl a2,a0
+ jmp __mark_node
+
+__mark_b_record_array:
+ movl 4(a0),a2
+ subl $256,d0
+ imull a2,d0
+ addl $3,d0
+ jmp __mark_basic_array
+
+__mark_strict_basic_array:
+ movl 4(a0),d0
+ cmpl $INT+2,a2
+ je __mark__strict__int__array
+ cmpl $BOOL+2,a2
+ je __mark__strict__bool__array
+__mark__strict__real__array:
+ addl d0,d0
+__mark__strict__int__array:
+ addl $3,d0
+ jmp __mark_basic_array
+__mark__strict__bool__array:
+ addl $12+3,d0
+ shrl $2,d0
+ jmp __mark_basic_array
+
+__mark_string_:
+ movl 4(a0),d0
+ addl $8+3,d0
+ shr $2,d0
+
+__mark_basic_array:
+ movl bit_set_table(a1),a1
+ addl d0,n_marked_words_offset(a4)
+
+ orl a1,(,d1,4)
+ lea -4(a0,d0,4),d0
+
+ addl neg_heap_p3_offset(a4),d0
+ shrl $7,d0
+ addl heap_vector_d4_offset(a4),d0
+
+ cmpl d0,d1
+ jae __mark_next_node
+
+ incl d1
+ movl $1,a2
+
+ cmpl d0,d1
+ jae __last__string__bits
+
+__mark_string_lp:
+ orl a2,(,d1,4)
+ incl d1
+ cmpl d0,d1
+ jb __mark_string_lp
+
+__last__string__bits:
+ orl a2,(,d1,4)
+ jmp __mark_next_node
diff --git a/thread/istartup.s b/thread/istartup.s
new file mode 100644
index 0000000..63a6376
--- /dev/null
+++ b/thread/istartup.s
@@ -0,0 +1,5165 @@
+/
+/ File: istartup.s
+/ Author: John van Groningen
+/ Machine: Intel 386
+
+#define K6_0 0
+
+#define d0 %eax
+#define d1 %ebx
+#define a0 %ecx
+#define a1 %edx
+#define a2 %ebp
+#define a3 %esi
+#define a4 %edi
+#define sp %esp
+
+#define d0w %ax
+#define d1w %bx
+#define a0w %cx
+#define a1w %dx
+#define a2w %bp
+#define a3w %si
+#define a4w %di
+
+#define d0b %al
+#define d1b %bl
+#define a0b %cl
+#define a1b %dl
+
+#define d0lb %al
+#define d0hb %ah
+#define d1lb %bl
+#define d1hb %bh
+
+#define SHARE_CHAR_INT
+#define MY_ITOS
+#define FINALIZERS
+#undef STACK_OVERFLOW_EXCEPTION_HANDLER
+#define WRITE_HEAP
+
+#undef MEASURE_GC
+#undef DEBUG
+#undef PREFETCH2
+
+#define NO_BIT_INSTRUCTIONS
+#define ADJUST_HEAP_SIZE
+#define MARK_GC
+#define MARK_AND_COPY_GC
+
+#define NEW_DESCRIPTORS
+
+/ #define PROFILE
+#define MODULE_NAMES_IN_TIME_PROFILER
+
+#undef COMPACT_GC_ONLY
+
+#define MINIMUM_HEAP_SIZE 8000
+#define MINIMUM_HEAP_SIZE_2 4000
+
+#if defined(_WINDOWS_) || defined (ELF)
+# define align(n) .align (1<<n)
+#else
+# define align(n) .align n
+#endif
+
+#ifdef OS2
+# define DLL
+# define NOCLIB
+#endif
+
+#ifdef _WINDOWS_
+# define NOCLIB
+#endif
+
+#ifdef LINUX
+# define section(n) .section .text.n,"ax"
+#else
+# define section(n) .text
+#endif
+
+#define DESCRIPTOR_ARITY_OFFSET (-2)
+#ifdef NEW_DESCRIPTORS
+# define ZERO_ARITY_DESCRIPTOR_OFFSET (-4)
+#else
+# define ZERO_ARITY_DESCRIPTOR_OFFSET (-8)
+#endif
+
+ .comm main_thread_local_storage,156
+
+free_heap_offset = 0
+end_heap_offset = 4
+
+int_to_real_scratch_offset = 8
+saved_a_stack_p_offset = 12
+
+heap_p1_offset = 16
+heap_p2_offset = 20
+heap_p3_offset = 24
+heap_vector_offset = 28
+heap_vector_d4_offset = 32
+end_vector_offset = 36
+bit_vector_p_offset = 40
+bit_vector_size_offset = 44
+heap_size_129_offset = 48
+heap_size_33_offset = 52
+heap_size_32_33_offset = 56
+heap_copied_vector_offset = 60
+heap_end_after_gc_offset = 64
+stack_p_offset = 68
+stack_top_offset = 72
+end_stack_offset = 76
+n_marked_words_offset = 80
+n_allocated_words_offset = 84
+n_free_words_after_mark_offset = 88
+bit_counter_offset = 92
+lazy_array_list_offset = 96
+neg_heap_p3_offset = 100
+
+heap_mbp_offset = 104
+stack_mbp_offset = 108
+heap_p_offset = 112
+heap_copied_vector_size_offset = 116
+heap_end_after_copy_gc_offset = 120
+extra_heap_offset = 124
+extra_heap_size_offset = 128
+halt_sp_offset = 132
+
+heap_size_offset = 136
+a_stack_size_offset = 140
+
+garbage_collect_flag_offset = 144
+
+#ifdef MARK_GC
+zero_bits_before_mark_offset = 148
+n_last_heap_free_bytes_offset = 152
+#endif
+
+ .globl tlsp_tls_index
+ .comm tlsp_tls_index,4
+
+ .comm basic_only,4
+#if !defined (OS2) && !defined (_WINDOWS_) && !defined (ELF)
+ .comm last_time,8
+ .comm execute_time,8
+ .comm garbage_collect_time,8
+ .comm IO_time,8
+#else
+ .comm last_time,4
+ .comm execute_time,4
+ .comm garbage_collect_time,4
+ .comm IO_time,4
+# ifdef MEASURE_GC
+ .comm compact_garbage_collect_time,4
+ .comm mark_compact_garbage_collect_time,4
+ .comm total_gc_bytes_lo,4
+ .comm total_gc_bytes_hi,4
+ .comm total_compact_gc_bytes_lo,4
+ .comm total_compact_gc_bytes_hi,4
+# endif
+#endif
+
+ .comm dll_initisialised,4
+
+#ifdef WRITE_HEAP
+ .comm heap_end_write_heap,4
+ .comm d3_flag_write_heap,4
+ .comm heap2_begin_and_end,8
+#endif
+
+#ifdef STACK_OVERFLOW_EXCEPTION_HANDLER
+ .comm a_stack_guard_page,4
+#endif
+
+ .globl profile_stack_pointer
+ .comm profile_stack_pointer,4
+
+ .data
+ align (2)
+
+caf_list:
+ .long 0
+ .globl caf_listp
+caf_listp:
+ .long 0
+
+zero_length_string:
+ .long __STRING__+2
+ .long 0
+true_string:
+ .long __STRING__+2
+ .long 4
+true_c_string:
+ .ascii "True"
+ .byte 0,0,0,0
+false_string:
+ .long __STRING__+2
+ .long 5
+false_c_string:
+ .ascii "False"
+ .byte 0,0,0
+file_c_string:
+ .ascii "File"
+ .byte 0,0,0,0
+
+ .comm sprintf_buffer,32
+
+out_of_memory_string_1:
+ .ascii "Not enough memory to allocate heap and stack"
+ .byte 10,0
+printf_int_string:
+ .ascii "%d"
+ .byte 0
+printf_real_string:
+ .ascii "%.15g"
+ .byte 0
+printf_string_string:
+ .ascii "%s"
+ .byte 0
+printf_char_string:
+ .ascii "%c"
+ .byte 0
+garbage_collect_string_1:
+ .asciz "A stack: "
+garbage_collect_string_2:
+ .asciz " bytes. BC stack: "
+garbage_collect_string_3:
+ .ascii " bytes."
+ .byte 10,0
+heap_use_after_gc_string_1:
+ .ascii "Heap use after garbage collection: "
+ .byte 0
+heap_use_after_gc_string_2:
+ .ascii " Bytes."
+ .byte 10,0
+stack_overflow_string:
+ .ascii "Stack overflow."
+ .byte 10,0
+out_of_memory_string_4:
+ .ascii "Heap full."
+ .byte 10,0
+time_string_1:
+ .ascii "Execution: "
+ .byte 0
+time_string_2:
+ .ascii " Garbage collection: "
+ .byte 0
+#ifdef MEASURE_GC
+time_string_3:
+ .ascii " "
+ .byte 0
+#endif
+time_string_4:
+ .ascii " Total: "
+ .byte 0
+high_index_string:
+ .ascii "Index too high in UPDATE string."
+ .byte 10,0
+low_index_string:
+ .ascii "Index negative in UPDATE string."
+ .byte 10,0
+IO_error_string:
+ .ascii "IO error: "
+ .byte 0
+new_line_string:
+ .byte 10,0
+
+sprintf_time_string:
+ .ascii "%d.%02d"
+ .byte 0
+
+#ifdef MARK_GC
+marked_gc_string_1:
+ .ascii "Marked: "
+ .byte 0
+#endif
+tls_alloc_error_string:
+ .ascii "Could not allocate thread local storage index"
+ .byte 10,0
+#ifdef PROFILE
+ align (2)
+# ifdef MODULE_NAMES_IN_TIME_PROFILER
+# ifdef LINUX
+ .globl m_system
+# endif
+m_system:
+ .long 6
+ .ascii "System"
+ .byte 0
+ .byte 0
+ .long m_system
+
+# endif
+garbage_collector_name:
+ .long 0
+ .asciz "garbage_collector"
+ align (2)
+#endif
+
+
+#ifdef NOCLIB
+ align (3)
+NAN_real:
+ .long 0xffffffff,0x7fffffff
+one_real:
+ .long 0x00000000,0x3ff00000
+zero_real:
+ .long 0x00000000,0x00000000
+#endif
+
+#ifdef NO_BIT_INSTRUCTIONS
+ align (2)
+bit_set_table:
+ .long 0x00000001,0x00000002,0x00000004,0x00000008
+ .long 0x00000010,0x00000020,0x00000040,0x00000080
+ .long 0x00000100,0x00000200,0x00000400,0x00000800
+ .long 0x00001000,0x00002000,0x00004000,0x00008000
+ .long 0x00010000,0x00020000,0x00040000,0x00080000
+ .long 0x00100000,0x00200000,0x00400000,0x00800000
+ .long 0x01000000,0x02000000,0x04000000,0x08000000
+ .long 0x10000000,0x20000000,0x40000000,0x80000000
+ .long 0
+bit_clear_table:
+ .long 0xfffffffe,0xfffffffd,0xfffffffb,0xfffffff7
+ .long 0xffffffef,0xffffffdf,0xffffffbf,0xffffff7f
+ .long 0xfffffeff,0xfffffdff,0xfffffbff,0xfffff7ff
+ .long 0xffffefff,0xffffdfff,0xffffbfff,0xffff7fff
+ .long 0xfffeffff,0xfffdffff,0xfffbffff,0xfff7ffff
+ .long 0xffefffff,0xffdfffff,0xffbfffff,0xff7fffff
+ .long 0xfeffffff,0xfdffffff,0xfbffffff,0xf7ffffff
+ .long 0xefffffff,0xdfffffff,0xbfffffff,0x7fffffff
+ .long 0xffffffff
+first_one_bit_table:
+ .byte -1,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+ .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
+#endif
+
+ align (2)
+ .comm sprintf_time_buffer,20
+
+ align (2)
+#ifdef SHARE_CHAR_INT
+ .globl small_integers
+ .comm small_integers,33*8
+ .globl static_characters
+ .comm static_characters,256*8
+#endif
+
+ .text
+
+ .globl @abc_main
+ .globl print
+ .globl print_char
+ .globl print_int
+ .globl print_real
+ .globl print__string__
+ .globl print__chars__sc
+ .globl print_sc
+ .globl print_symbol
+ .globl print_symbol_sc
+ .globl printD
+ .globl DtoAC
+ .globl push_t_r_args
+ .globl push_a_r_args
+ .globl halt
+ .globl dump
+
+ .globl catAC
+ .globl sliceAC
+ .globl updateAC
+ .globl eqAC
+ .globl cmpAC
+
+ .globl string_to_string_node
+ .globl int_array_to_node
+ .globl real_array_to_node
+
+ .globl _create_arrayB
+ .globl _create_arrayC
+ .globl _create_arrayI
+ .globl _create_arrayR
+ .globl _create_r_array
+ .globl create_array
+ .globl create_arrayB
+ .globl create_arrayC
+ .globl create_arrayI
+ .globl create_arrayR
+ .globl create_R_array
+
+ .globl BtoAC
+ .globl ItoAC
+ .globl RtoAC
+ .globl eqD
+
+ .globl collect_0
+ .globl collect_1
+ .globl collect_2
+
+ .globl collect_0l
+ .globl collect_1l
+ .globl collect_2l
+
+ .globl yet_args_needed
+ .globl yet_args_needed_0
+ .globl yet_args_needed_1
+ .globl yet_args_needed_2
+ .globl yet_args_needed_3
+ .globl yet_args_needed_4
+
+ .globl _c3,_c4,_c5,_c6,_c7,_c8,_c9,_c10,_c11,_c12
+ .globl _c13,_c14,_c15,_c16,_c17,_c18,_c19,_c20,_c21,_c22
+ .globl _c23,_c24,_c25,_c26,_c27,_c28,_c29,_c30,_c31,_c32
+
+ .globl e__system__nind
+ .globl e__system__eaind
+/ old names of the previous two labels for compatibility, remove later
+ .globl __indirection,__eaind
+ .globl e__system__dind
+ .globl eval_fill
+
+ .globl eval_upd_0,eval_upd_1,eval_upd_2,eval_upd_3,eval_upd_4
+ .globl eval_upd_5,eval_upd_6,eval_upd_7,eval_upd_8,eval_upd_9
+ .globl eval_upd_10,eval_upd_11,eval_upd_12,eval_upd_13,eval_upd_14
+ .globl eval_upd_15,eval_upd_16,eval_upd_17,eval_upd_18,eval_upd_19
+ .globl eval_upd_20,eval_upd_21,eval_upd_22,eval_upd_23,eval_upd_24
+ .globl eval_upd_25,eval_upd_26,eval_upd_27,eval_upd_28,eval_upd_29
+ .globl eval_upd_30,eval_upd_31,eval_upd_32
+
+ .globl repl_args_b
+ .globl push_arg_b
+ .globl del_args
+#if 0
+ .globl o__S_P2
+ .globl ea__S_P2
+#endif
+ .globl add_IO_time
+ .globl add_execute_time
+ .globl @IO_error
+ .globl stack_overflow
+
+ .globl out_of_memory_4
+ .globl print_error
+
+ .global _start
+
+ .globl tan_real
+ .globl asin_real
+ .globl acos_real
+ .globl atan_real
+ .globl ln_real
+ .globl log10_real
+ .globl exp_real
+ .globl pow_real
+ .globl r_to_i_real
+ .globl truncate_real
+ .globl entier_real
+ .globl ceiling_real
+ .globl round__real64
+ .globl truncate__real64
+ .globl entier__real64
+ .globl ceiling__real64
+ .globl int64a__to__real
+
+#ifdef NOCLIB
+ .globl @c_pow
+ .globl @c_log10
+ .globl @c_entier
+#endif
+#ifdef PROFILE
+ .globl init_profiler
+ .globl profile_s,profile_n,profile_r,profile_t
+ .globl write_profile_information,write_profile_stack
+#endif
+ .globl __driver
+
+/ from system.abc:
+ .global INT
+ .global CHAR
+ .global BOOL
+ .global REAL
+ .global FILE
+ .global __STRING__
+ .global __ARRAY__
+ .global __cycle__in__spine
+ .global __print__graph
+ .global __eval__to__nf
+
+/ from wcon.c:
+ .globl @w_print_char
+ .globl @w_print_string
+ .globl @w_print_text
+ .globl @w_print_int
+ .globl @w_print_real
+
+ .globl @ew_print_char
+ .globl @ew_print_text
+ .globl @ew_print_string
+ .globl @ew_print_int
+
+ .global @ab_stack_size
+ .global @heap_size
+ .global @flags
+
+/ from standard c library:
+#ifdef USE_CLIB
+ .globl @malloc
+ .globl @free
+ .globl @sprintf
+ .globl @strlen
+#else
+ .globl @allocate_memory
+# ifdef STACK_OVERFLOW_EXCEPTION_HANDLER
+ .globl @allocate_memory_with_guard_page_at_end
+# endif
+ .globl @free_memory
+#endif
+
+#ifdef ADJUST_HEAP_SIZE
+ .global @heap_size_multiple
+ .global @initial_heap_size
+#endif
+#ifdef WRITE_HEAP
+ .global @min_write_heap_size
+#endif
+#ifdef FINALIZERS
+ .global __Nil
+ .globl finalizer_list
+ .comm finalizer_list,4
+ .globl free_finalizer_list
+ .comm free_finalizer_list,4
+#endif
+
+@abc_main:
+ push d1
+ push a0
+ push a1
+ push a2
+ push a3
+ push a4
+
+ finit
+ fldz
+ fldz
+ fldz
+ fldz
+ fldz
+ fldz
+ fldz
+
+ call init_clean
+ test %eax,%eax
+ jne init_error
+
+ call init_timer
+
+ mov sp,halt_sp_offset(a4)
+
+#ifdef PROFILE
+ call init_profiler
+#endif
+
+#ifdef ELF
+ call __start
+#else
+ call _start
+#endif
+
+exit:
+ call exit_clean
+
+init_error:
+ pop a4
+ pop a3
+ pop a2
+ pop a1
+ pop a0
+ pop d1
+ ret
+
+#if defined (_WINDOWS_) || defined (LINUX)
+# ifdef _WINDOWS_
+ .globl @DllMain?12
+@DllMain?12:
+ cmpl $1,8(sp)
+ je DLL_PROCESS_ATTACH
+ jb DLL_PROCESS_DETACH
+ ret $12
+
+DLL_PROCESS_ATTACH:
+# else
+ .globl clean_init
+clean_init:
+# endif
+ push d1
+ push a0
+ push a1
+ push a2
+ push a3
+ push a4
+
+ movl $1,dll_initisialised
+
+ call init_clean
+ test %eax,%eax
+ jne init_dll_error
+
+ call init_timer
+
+ mov sp,halt_sp_offset(a4)
+
+# ifdef PROFILE
+ call init_profiler
+# endif
+
+ mov %esi,saved_a_stack_p_offset(a4)
+
+ movl $1,%eax
+ jmp exit_dll_init
+
+init_dll_error:
+ xor %eax,%eax
+ jmp exit_dll_init
+# ifdef _WINDOWS_
+DLL_PROCESS_DETACH:
+# else
+ .globl clean_fini
+clean_fini:
+# endif
+ push d1
+ push a0
+ push a1
+ push a2
+ push a3
+ push a4
+
+ mov saved_a_stack_p_offset(a4),%esi
+
+ call exit_clean
+
+exit_dll_init:
+ pop a4
+ pop a3
+ pop a2
+ pop a1
+ pop a0
+ pop d1
+# ifdef _WINDOWS_
+ ret $12
+# else
+ ret
+# endif
+#endif
+
+ .globl @TlsAlloc?0
+
+init_clean:
+ call @TlsAlloc?0
+
+ cmp $64,d0
+ jae tls_alloc_error
+
+ movl d0,tlsp_tls_index
+
+ lea main_thread_local_storage,a4
+
+ movl a4,%fs:0x0e10(,d0,4)
+
+ mov @flags,d0
+ andl $1,d0
+ mov d0,basic_only
+
+ movl @heap_size,d0
+ add $7,d0
+ andl $-8,d0
+ movl d0,@heap_size
+ movl d0,heap_size_offset(a4)
+#ifdef PREFETCH2
+ sub $63,d0
+#else
+ sub $3,d0
+#endif
+ xorl a1,a1
+ mov $33,d1
+ div d1
+ movl d0,heap_size_33_offset(a4)
+
+ movl heap_size_offset(a4),d0
+ sub $3,d0
+ xorl a1,a1
+ mov $129,d1
+ div d1
+ mov d0,heap_size_129_offset(a4)
+ add $3,d0
+ andl $-4,d0
+ movl d0,heap_copied_vector_size_offset(a4)
+ movl $0,heap_end_after_copy_gc_offset(a4)
+
+ movl heap_size_offset(a4),d0
+ add $7,d0
+
+ push d0
+#ifdef USE_CLIB
+ call @malloc
+#else
+ call @allocate_memory
+#endif
+ add $4,sp
+
+ test d0,d0
+ je no_memory_2
+
+ mov d0,heap_mbp_offset(a4)
+ addl $3,d0
+ and $-4,d0
+ mov d0,free_heap_offset(a4)
+ mov d0,heap_p_offset(a4)
+
+ mov @ab_stack_size,a2
+ movl a2,a_stack_size_offset(a4)
+ add $3,a2
+
+ push a2
+#ifdef STACK_OVERFLOW_EXCEPTION_HANDLER
+ call @allocate_memory_with_guard_page_at_end
+#else
+# ifdef USE_CLIB
+ call @malloc
+# else
+ call @allocate_memory
+# endif
+#endif
+ add $4,sp
+
+ test d0,d0
+ je no_memory_3
+
+ mov d0,stack_mbp_offset(a4)
+#ifdef STACK_OVERFLOW_EXCEPTION_HANDLER
+ addl a_stack_size_offset(a4),d0
+ addl $3+4095,d0
+ andl $-4096,d0
+ movl d0,a_stack_guard_page
+ subl a_stack_size_offset(a4),d0
+#endif
+ add $3,d0
+ andl $-4,d0
+
+ mov d0,a3
+ mov d0,stack_p_offset(a4)
+
+#ifdef SHARE_CHAR_INT
+ leal small_integers,a0
+ xorl d0,d0
+ leal INT+2,d1
+
+make_small_integers_lp:
+ mov d1,(a0)
+ mov d0,4(a0)
+ inc d0
+ add $8,a0
+ cmp $33,d0
+ jne make_small_integers_lp
+
+ leal static_characters,a0
+ xorl d0,d0
+ leal CHAR+2,d1
+
+make_static_characters_lp:
+ mov d1,(a0)
+ mov d0,4(a0)
+ inc d0
+ add $8,a0
+ cmp $256,d0
+ jne make_static_characters_lp
+#endif
+
+ lea caf_list+4,a0
+ movl a0,caf_listp
+
+#ifdef FINALIZERS
+ movl $__Nil-4,finalizer_list
+ movl $__Nil-4,free_finalizer_list
+#endif
+
+ mov free_heap_offset(a4),a1
+ mov a1,heap_p1_offset(a4)
+
+ movl heap_size_129_offset(a4),a2
+ shl $4,a2
+ lea (a1,a2,4),d0
+ mov d0,heap_copied_vector_offset(a4)
+ add heap_copied_vector_size_offset(a4),d0
+ mov d0,heap_p2_offset(a4)
+
+ movb $0,garbage_collect_flag_offset(a4)
+
+# ifdef MARK_AND_COPY_GC
+ testb $64,@flags
+ je no_mark1
+# endif
+
+# if defined (MARK_GC) || defined (COMPACT_GC_ONLY)
+ movl heap_size_33_offset(a4),d0
+ movl a1,heap_vector_offset(a4)
+ addl d0,a1
+# ifdef PREFETCH2
+ addl $63,a1
+ andl $-64,a1
+# else
+ addl $3,a1
+ andl $-4,a1
+# endif
+ movl a1,free_heap_offset(a4)
+ movl a1,heap_p3_offset(a4)
+ lea (,d0,8),a2
+ movb $-1,garbage_collect_flag_offset(a4)
+# endif
+
+# ifdef MARK_AND_COPY_GC
+no_mark1:
+# endif
+
+# ifdef ADJUST_HEAP_SIZE
+ movl @initial_heap_size,d0
+# ifdef MARK_AND_COPY_GC
+ movl $(MINIMUM_HEAP_SIZE_2),d1
+ testb $64,@flags
+ jne no_mark9
+ addl d1,d1
+no_mark9:
+# else
+# if defined (MARK_GC) || defined (COMPACT_GC_ONLY)
+ movl $(MINIMUM_HEAP_SIZE),d1
+# else
+ movl $(MINIMUM_HEAP_SIZE_2),d1
+# endif
+# endif
+
+ cmpl d1,d0
+ jle too_large_or_too_small
+ shr $2,d0
+ cmpl a2,d0
+ jge too_large_or_too_small
+ movl d0,a2
+too_large_or_too_small:
+# endif
+
+ lea (a1,a2,4),d0
+ mov d0,heap_end_after_gc_offset(a4)
+ subl $32,d0
+ movl d0,end_heap_offset(a4)
+
+# ifdef MARK_AND_COPY_GC
+ testb $64,@flags
+ je no_mark2
+# endif
+
+# if defined (MARK_GC) && defined (ADJUST_HEAP_SIZE)
+ movl a2,bit_vector_size_offset(a4)
+# endif
+
+# ifdef MARK_AND_COPY_GC
+no_mark2:
+# endif
+
+ xor %eax,%eax
+ ret
+
+tls_alloc_error:
+ push $tls_alloc_error_string
+ call @ew_print_string
+ add $4,sp
+#ifdef _WINDOWS_
+ movl $1,@execution_aborted
+#endif
+
+ movl $1,%eax
+ ret
+
+no_memory_2:
+ push $out_of_memory_string_1
+ call @ew_print_string
+ add $4,sp
+#ifdef _WINDOWS_
+ movl $1,@execution_aborted
+#endif
+ movl $1,%eax
+ ret
+
+no_memory_3:
+ push $out_of_memory_string_1
+ call @ew_print_string
+ add $4,sp
+#ifdef _WINDOWS_
+ movl $1,@execution_aborted
+#endif
+
+ push heap_mbp_offset(a4)
+#ifdef USE_CLIB
+ call @free
+#else
+ call @free_memory
+#endif
+ add $4,sp
+
+ movl $1,%eax
+ ret
+
+exit_clean:
+ call add_execute_time
+
+ mov @flags,d0
+ testb $8,d0b
+ je no_print_execution_time
+
+ push $time_string_1
+ call @ew_print_string
+ add $4,sp
+
+ mov execute_time,d0
+#if !defined (OS2) && !defined (_WINDOWS_) && !defined (ELF)
+ mov execute_time+4,d1
+#endif
+ call print_time
+
+ push $time_string_2
+ call @ew_print_string
+ add $4,sp
+
+ mov garbage_collect_time,d0
+#if !defined (OS2) && !defined (_WINDOWS_) && !defined (ELF)
+ mov garbage_collect_time+4,d1
+#endif
+ call print_time
+
+#ifdef MEASURE_GC
+ push $time_string_3
+ call @ew_print_string
+ add $4,sp
+
+ mov mark_compact_garbage_collect_time,d0
+# if !defined (OS2) && !defined (_WINDOWS_)
+ mov mark_compact_garbage_collect_time+4,d1
+# endif
+ call print_time
+
+ push $time_string_3
+ call @ew_print_string
+ add $4,sp
+
+ mov compact_garbage_collect_time,d0
+# if !defined (OS2) && !defined (_WINDOWS_)
+ mov compact_garbage_collect_time+4,d1
+# endif
+ call print_time
+#endif
+
+ push $time_string_4
+ call @ew_print_string
+ add $4,sp
+
+#if !defined (OS2) && !defined (_WINDOWS_) && !defined (ELF)
+ mov execute_time,d0
+ mov execute_time+4,d1
+
+ add garbage_collect_time,d0
+ add garbage_collect_time+4,d1
+ cmp $1000000,d1
+ jb no_usec_overflow_1
+ sub $1000000,d1
+ inc d0
+no_usec_overflow_1:
+
+ add IO_time,d0
+ add IO_time+4,d1
+ cmp $1000000,d1
+ jb no_usec_overflow_2
+ sub $1000000,d1
+ inc d0
+no_usec_overflow_2:
+#else
+ mov execute_time,d0
+ add garbage_collect_time,d0
+ add IO_time,d0
+# ifdef MEASURE_GC
+ add mark_compact_garbage_collect_time,d0
+ add compact_garbage_collect_time,d0
+# endif
+#endif
+
+ call print_time
+
+#ifdef MEASURE_GC
+ push $10
+ call @ew_print_char
+ addl $4,sp
+
+ pushl total_gc_bytes_hi
+ call @ew_print_int
+ addl $4,sp
+
+ push $':'
+ call @ew_print_char
+ addl $4,sp
+
+ pushl total_gc_bytes_lo
+ call @ew_print_int
+ addl $4,sp
+
+ push $32
+ call @ew_print_char
+ addl $4,sp
+
+ pushl total_compact_gc_bytes_hi
+ call @ew_print_int
+ addl $4,sp
+
+ push $':'
+ call @ew_print_char
+ addl $4,sp
+
+ pushl total_compact_gc_bytes_lo
+ call @ew_print_int
+ addl $4,sp
+#endif
+
+ push $10
+ call @ew_print_char
+ add $4,sp
+
+no_print_execution_time:
+ push stack_mbp_offset(a4)
+#ifdef USE_CLIB
+ call @free
+#else
+ call @free_memory
+#endif
+ add $4,sp
+
+ push heap_mbp_offset(a4)
+#ifdef USE_CLIB
+ call @free
+#else
+ call @free_memory
+#endif
+ add $4,sp
+
+#ifdef PROFILE
+ call write_profile_information
+#endif
+
+ ret
+
+__driver:
+ mov @flags,a2
+ test $16,a2
+ je __print__graph
+ jmp __eval__to__nf
+
+
+print_time:
+#if !defined (OS2) && !defined (_WINDOWS_) && !defined (ELF)
+ mov d0,a0
+ xorl a1,a1
+ mov d1,d0
+ mov $10000,d1
+ div d1
+#else
+ xorl a1,a1
+ movl $1000,d1
+ div d1
+ movl d0,a0
+ movl a1,d0
+ xorl a1,a1
+ movl $10,d1
+ div d1
+#endif
+ push d0
+ push a0
+
+#ifdef USE_CLIB
+ push $sprintf_time_string
+ push $sprintf_time_buffer
+ call @sprintf
+ add $16,sp
+
+ push $sprintf_time_buffer
+ call @ew_print_string
+ add $4,sp
+#else
+ call @ew_print_int
+
+ add $4,sp
+ movl $sprintf_time_buffer,a0
+
+ xorl a1,a1
+ mov $10,d1
+
+/ movb $'.',(a0)
+ movb $46,(a0)
+ pop d0
+
+ div d1
+ add $48,d0
+ add $48,a1
+ movb d0b,1(a0)
+ movb a1b,2(a0)
+
+ push $3
+ push a0
+ call @ew_print_text
+ add $8,sp
+#endif
+ ret
+
+print_sc:
+ mov basic_only,a2
+ test a2,a2
+ jne end_print
+
+print:
+ push d0
+ call @w_print_string
+ add $4,sp
+
+end_print:
+ ret
+
+dump:
+ call print
+ jmp halt
+
+printD: testb $2,d0b
+ jne printD_
+
+ mov d0,a2
+ jmp print_string_a2
+
+DtoAC_record:
+#ifdef NEW_DESCRIPTORS
+ movl -6(d0),a2
+#else
+ movl -4(a2),a2
+#endif
+ jmp DtoAC_string_a2
+
+DtoAC: testb $2,d0b
+ jne DtoAC_
+
+ mov d0,a2
+ jmp DtoAC_string_a2
+
+DtoAC_:
+#ifdef NEW_DESCRIPTORS
+ cmpw $256,-2(d0)
+ jae DtoAC_record
+
+ movzwl (d0),d1
+ lea 10(d0,d1),a2
+#else
+ movswl -2(d0),d1
+ lea -2(d0),a2
+ cmp $256,d1
+ jae DtoAC_record
+
+ shl $3,d1
+ sub d1,a2
+
+ movzwl DESCRIPTOR_ARITY_OFFSET(a2),d1
+ lea 4(a2,d1,8),a2
+#endif
+
+DtoAC_string_a2:
+ movl (a2),d0
+ lea 4(a2),a0
+ jmp build_string
+
+print_symbol:
+ xorl d1,d1
+ jmp print_symbol_2
+
+print_symbol_sc:
+ mov basic_only,d1
+print_symbol_2:
+ mov (a0),d0
+
+ cmp $INT+2,d0
+ je print_int_node
+
+ cmp $CHAR+2,d0
+ je print_char_denotation
+
+ cmp $BOOL+2,d0
+ je print_bool
+
+ cmp $REAL+2,d0
+ je print_real_node
+
+ test d1,d1
+ jne end_print_symbol
+
+printD_:
+#ifdef NEW_DESCRIPTORS
+ cmpw $256,-2(d0)
+ jae print_record
+
+ movzwl (d0),d1
+ lea 10(d0,d1),a2
+ jmp print_string_a2
+
+print_record:
+ movl -6(d0),a2
+ jmp print_string_a2
+#else
+ movswl -2(d0),d1
+ lea -2(d0),a2
+ cmp $256,d1
+ jae no_print_record
+
+ shl $3,d1
+ sub d1,a2
+
+ movzwl DESCRIPTOR_ARITY_OFFSET(a2),d1
+ lea 4(a2,d1,8),a2
+ jmp print_string_a2
+
+no_print_record:
+ mov -4(a2),a2
+ jmp print_string_a2
+#endif
+
+end_print_symbol:
+ ret
+
+print_int_node:
+ push 4(a0)
+ call @w_print_int
+ add $4,sp
+ ret
+
+print_int:
+ push d0
+ call @w_print_int
+ add $4,sp
+ ret
+
+print_char_denotation:
+ test d1,d1
+ jne print_char_node
+
+ push 4(a0)
+
+ push $0x27
+ call @w_print_char
+ add $4,sp
+
+ call @w_print_char
+ add $4,sp
+
+ push $0x27
+ call @w_print_char
+ add $4,sp
+
+ ret
+
+print_char_node:
+ push 4(a0)
+ call @w_print_char
+ add $4,sp
+ ret
+
+print_char:
+ push d0
+ call @w_print_char
+ add $4,sp
+ ret
+
+print_bool:
+ movsbl 4(a0),a0
+ test a0,a0
+ je print_false
+
+print_true:
+ push $true_c_string
+ call @w_print_string
+ add $4,sp
+ ret
+
+print_false:
+ push $false_c_string
+ call @w_print_string
+ add $4,sp
+ ret
+
+print_real:
+ subl $8,sp
+ fstpl 0(sp)
+ jmp print_real_
+print_real_node:
+ push 8(a0)
+ push 4(a0)
+print_real_:
+ ffree %st(0)
+ ffree %st(1)
+ ffree %st(2)
+ ffree %st(3)
+ ffree %st(4)
+ ffree %st(5)
+ ffree %st(6)
+ ffree %st(7)
+ call @w_print_real
+ add $8,sp
+ ret
+
+print_string_a2:
+ add $4,a2
+ push -4(a2)
+ push a2
+ call @w_print_text
+ add $8,sp
+ ret
+
+print__chars__sc:
+ mov basic_only,a2
+ test a2,a2
+ jne no_print_chars
+
+print__string__:
+ push 4(a0)
+ lea 8(a0),a2
+ push a2
+ call @w_print_text
+ add $8,sp
+no_print_chars:
+ ret
+
+push_a_r_args:
+ pushl a4
+
+ movl 8(a0),a1
+ subl $2,a1
+ movzwl (a1),a4
+ subl $256,a4
+ movzwl 2(a1),d1
+ addl $4,a1
+ pushl a1
+
+ movl a4,a1
+ subl d1,a1
+
+ shl $2,d0
+ lea 12(a0,d1,4),a0
+ dec a4
+mul_array_size_lp:
+ addl d0,a0
+ subl $1,a4
+ jnc mul_array_size_lp
+
+ lea (a0,a1,4),a4
+ jmp push_a_elements
+push_a_elements_lp:
+ movl -4(a0),d0
+ subl $4,a0
+ movl d0,(a3)
+ addl $4,a3
+push_a_elements:
+ subl $1,d1
+ jnc push_a_elements_lp
+
+ movl a4,a0
+ popl d0
+ popl a4
+
+ popl a2
+ jmp push_b_elements
+push_b_elements_lp:
+ pushl -4(a0)
+ subl $4,a0
+push_b_elements:
+ subl $1,a1
+ jnc push_b_elements_lp
+
+ jmp *a2
+
+push_t_r_args:
+ popl a2
+
+ movl (a0),a1
+ addl $4,a0
+ subl $2,a1
+ movzwl (a1),d0
+ subl $256,d0
+ movzwl 2(a1),d1
+ addl $4,a1
+
+ movl a1,(a3)
+ movl d1,4(a3)
+
+ subl d0,d1
+ negl d1
+
+ lea (a0,d0,4),a1
+ cmpl $2,d0
+ jbe small_record
+ movl 4(a0),a1
+ lea -4(a1,d0,4),a1
+small_record:
+ jmp push_r_b_elements
+
+push_r_b_elements_lp:
+ dec d0
+ jne not_first_arg_b
+
+ pushl (a0)
+ jmp push_r_b_elements
+not_first_arg_b:
+ pushl -4(a1)
+ subl $4,a1
+push_r_b_elements:
+ subl $1,d1
+ jnc push_r_b_elements_lp
+
+ movl 4(a3),d1
+ pushl a2
+ pushl (a3)
+ jmp push_r_a_elements
+
+push_r_a_elements_lp:
+ dec d0
+ jne not_first_arg_a
+
+ movl (a0),a2
+ movl a2,(a3)
+ addl $4,a3
+ jmp push_r_a_elements
+not_first_arg_a:
+ movl -4(a1),a2
+ subl $4,a1
+ movl a2,(a3)
+ addl $4,a3
+push_r_a_elements:
+ subl $1,d1
+ jnc push_r_a_elements_lp
+
+ popl d0
+ ret
+
+BtoAC:
+ testb d0b,d0b
+ je BtoAC_false
+BtoAC_true:
+ mov $true_string,a0
+ ret
+BtoAC_false:
+ mov $false_string,a0
+ ret
+
+RtoAC:
+#ifndef USE_CLIB
+ push $sprintf_buffer
+#endif
+ subl $8,sp
+ fstl 0(sp)
+
+ ffree %st(0)
+ ffree %st(1)
+ ffree %st(2)
+ ffree %st(3)
+ ffree %st(4)
+ ffree %st(5)
+ ffree %st(6)
+ ffree %st(7)
+
+#ifdef USE_CLIB
+ push $printf_real_string
+ push $sprintf_buffer
+ call @sprintf
+ add $16,sp
+#else
+ call @convert_real_to_string
+ add $12,sp
+#endif
+ jmp return_sprintf_buffer
+
+ItoAC:
+#ifdef MY_ITOS
+ mov $sprintf_buffer,a0
+ call int_to_string
+
+ movl a0,d0
+ subl $sprintf_buffer,d0
+
+ jmp sprintf_buffer_to_string
+
+# ifdef NOCLIB
+ .globl @convert_int_to_string
+@convert_int_to_string:
+ push a0
+ push a1
+ push a2
+ push d1
+ movl 16+4(sp),a0
+ movl 16+8(sp),d0
+ call int_to_string
+ movl a0,d0
+ pop d1
+ pop a2
+ pop a1
+ pop a0
+ ret
+# endif
+
+int_to_string:
+ test d0,d0
+ jns no_minus
+ movb $45,(a0)
+ inc a0
+ neg d0
+no_minus:
+ lea 12(a0),a2
+
+ je zero_digit
+
+#ifdef USE_DIV
+ movl $10,d1
+#endif
+
+calculate_digits:
+#ifndef USE_DIV
+ cmp $10,d0
+#else
+ cmp d1,d0
+#endif
+ jb last_digit
+
+#ifndef USE_DIV
+ movl $0xcccccccd,a1
+ movl d0,d1
+
+ mull a1
+
+ movl a1,d0
+ andl $-8,a1
+ add $48,d1
+
+ shrl $3,d0
+ subl a1,d1
+ shrl $2,a1
+
+ subl a1,d1
+ movb d1b,(a2)
+#else
+ xorl a1,a1
+ div d1
+ add $48,a1
+ movb a1b,(a2)
+#endif
+ inc a2
+ jmp calculate_digits
+
+last_digit:
+ test d0,d0
+ je no_zero
+zero_digit:
+ add $48,d0
+ movb d0b,(a2)
+ inc a2
+no_zero:
+ lea 12(a0),a1
+
+reverse_digits:
+ movb -1(a2),d1b
+ dec a2
+ movb d1b,(a0)
+ inc a0
+ cmp a2,a1
+ jne reverse_digits
+
+ movb $0,(a0)
+ ret
+#else
+ push d0
+ push $printf_int_string
+ push $sprintf_buffer
+ call @sprintf
+ add $12,sp
+#endif
+
+return_sprintf_buffer:
+#ifdef USE_CLIB
+ push $sprintf_buffer
+ call @strlen
+ add $4,sp
+#else
+ mov $sprintf_buffer-1,d0
+skip_characters:
+ inc d0
+ cmpb $0,(d0)
+ jne skip_characters
+
+ sub $sprintf_buffer,d0
+#endif
+
+#ifdef MY_ITOS
+sprintf_buffer_to_string:
+ mov $sprintf_buffer,a0
+build_string:
+#endif
+ lea 3(d0),d1
+ shr $2,d1
+ add $2,d1
+
+ movl free_heap_offset(a4),a2
+ lea -32(a2,d1,4),a2
+ cmpl end_heap_offset(a4),a2
+ jb D_to_S_no_gc
+
+ push a0
+ call collect_0l
+ pop a0
+
+D_to_S_no_gc:
+ movl free_heap_offset(a4),a2
+ sub $2,d1
+ mov a2,a1
+ movl $__STRING__+2,(a2)
+ mov d0,4(a2)
+ add $8,a2
+ jmp D_to_S_cp_str_2
+
+D_to_S_cp_str_1:
+ mov (a0),d0
+ add $4,a0
+ mov d0,(a2)
+ add $4,a2
+D_to_S_cp_str_2:
+ sub $1,d1
+ jnc D_to_S_cp_str_1
+
+ movl a2,free_heap_offset(a4)
+ movl a1,a0
+ ret
+
+eqD: mov (a0),d0
+ cmp (a1),d0
+ jne eqD_false
+
+ cmp $INT+2,d0
+ je eqD_INT
+ cmp $CHAR+2,d0
+ je eqD_CHAR
+ cmp $BOOL+2,d0
+ je eqD_BOOL
+ cmp $REAL+2,d0
+ je eqD_REAL
+
+ mov $1,d0
+ ret
+
+eqD_CHAR:
+eqD_INT: mov 4(a0),d1
+ xorl d0,d0
+ cmp 4(a1),d1
+ sete %al
+ ret
+
+eqD_BOOL: movb 4(a0),d1b
+ xorl d0,d0
+ cmpb 4(a1),d1b
+ sete d0b
+ ret
+
+eqD_REAL:
+ fldl 4(a0)
+ fcompl 4(a1)
+ fnstsw %ax
+ andb $68,%ah
+ xorb $64,%ah
+ sete %al
+ andl $1,%eax
+ ret
+
+eqD_false:
+ xorl d0,d0
+ ret
+/
+/ the timer
+/
+
+#if !defined (OS2) && !defined (_WINDOWS_) && !defined (ELF)
+init_timer:
+ sub $88,sp
+ push sp
+ push $0
+ call @getrusage
+ add $8,sp
+
+ mov (sp),d0
+ mov 4(sp),d1
+ mov d0,last_time
+ mov d1,last_time+4
+ xorl d0,d0
+ mov d0,execute_time
+ mov d0,execute_time+4
+ mov d0,garbage_collect_time
+ mov d0,garbage_collect_time+4
+ mov d0,IO_time
+ mov d0,IO_time+4
+ add $88,sp
+ ret
+
+get_time_diff:
+ sub $88,sp
+ push sp
+ push $0
+ call @getrusage
+ add $8,sp
+
+ mov (sp),d0
+ mov 4(sp),d1
+
+ mov $last_time,a0
+ mov (a0),a1
+ mov d0,(a0)
+ sub a1,d0
+
+ mov 4(a0),a1
+ mov d1,4(a0)
+
+ sub a1,d1
+ jae get_time_diff_1
+ add $1000000,d1
+ dec d0
+get_time_diff_1:
+ add $88,sp
+ ret
+
+add_execute_time:
+ push d1
+
+ call get_time_diff
+
+ mov $execute_time,a0
+
+add_time:
+ add (a0),d0
+ add 4(a0),d1
+ cmp $1000000,d1
+ jb add_execute_time_1
+ sub $1000000,d1
+ inc d0
+add_execute_time_1:
+ mov d0,(a0)
+ mov d1,4(a0)
+ pop d1
+ ret
+
+add_garbage_collect_time:
+ push d1
+ call get_time_diff
+
+ mov $garbage_collect_time,a0
+ jmp add_time
+
+add_IO_time:
+ push d1
+ call get_time_diff
+
+ mov $IO_time,a0
+ jmp add_time
+#else
+
+init_timer:
+#ifdef _WINDOWS_
+ call _GetTickCount?0
+#else
+# ifdef ELF
+ subl $20,sp
+ push sp
+ call times
+ addl $4,sp
+ movl (sp),d0
+ imul $10,d0
+ addl $20,sp
+# else
+ subl $4,sp
+ pushl $4
+ lea 4(sp),a0
+ pushl a0
+ pushl $14
+ pushl $14
+ call _DosQuerySysInfo
+ addl $16,sp
+ popl d0
+# endif
+#endif
+ mov d0,last_time
+ xorl d0,d0
+ mov d0,execute_time
+ mov d0,garbage_collect_time
+ mov d0,IO_time
+#ifdef MEASURE_GC
+ mov d0,mark_compact_garbage_collect_time
+ mov d0,compact_garbage_collect_time
+#endif
+ ret
+
+get_time_diff:
+#ifdef _WINDOWS_
+ call _GetTickCount?0
+#else
+# ifdef ELF
+ subl $20,sp
+ push sp
+ call times
+ addl $4,sp
+ movl (sp),d0
+ imul $10,d0
+ addl $20,sp
+# else
+ subl $4,sp
+ pushl $4
+ lea 4(sp),a0
+ pushl a0
+ pushl $14
+ pushl $14
+ call _DosQuerySysInfo
+ addl $16,sp
+ popl d0
+# endif
+#endif
+ mov $last_time,a0
+ mov (a0),a1
+ mov d0,(a0)
+ sub a1,d0
+ ret
+
+add_execute_time:
+ call get_time_diff
+ mov $execute_time,a0
+
+add_time:
+ add (a0),d0
+ mov d0,(a0)
+ ret
+
+add_garbage_collect_time:
+ call get_time_diff
+ mov $garbage_collect_time,a0
+ jmp add_time
+
+add_IO_time:
+ call get_time_diff
+ mov $IO_time,a0
+ jmp add_time
+
+# ifdef MEASURE_GC
+add_mark_compact_garbage_collect_time:
+ call get_time_diff
+ mov $mark_compact_garbage_collect_time,a0
+ jmp add_time
+
+add_compact_garbage_collect_time:
+ call get_time_diff
+ mov $compact_garbage_collect_time,a0
+ jmp add_time
+# endif
+#endif
+
+/
+/ the garbage collector
+/
+
+collect_2l:
+#ifdef PROFILE
+ pushl a2
+ movl $garbage_collector_name,a2
+ call profile_s
+ popl a2
+#endif
+ mov a0,(a3)
+ mov a1,4(a3)
+ add $8,a3
+ call collect_0l_
+ mov -4(a3),a1
+ mov -8(a3),a0
+ sub $8,a3
+#ifdef PROFILE
+ jmp profile_r
+#else
+ ret
+#endif
+
+collect_1l:
+#ifdef PROFILE
+ pushl a2
+ movl $garbage_collector_name,a2
+ call profile_s
+ popl a2
+#endif
+ mov a0,(a3)
+ add $4,a3
+ call collect_0l_
+ mov -4(a3),a0
+ sub $4,a3
+#ifdef PROFILE
+ jmp profile_r
+#else
+ ret
+#endif
+
+collect_2:
+#ifdef PROFILE
+ movl $garbage_collector_name,a2
+ call profile_s
+#endif
+ mov a0,(a3)
+ mov a1,4(a3)
+ add $8,a3
+ call collect_0_
+ mov -4(a3),a1
+ mov -8(a3),a0
+ sub $8,a3
+#ifdef PROFILE
+ jmp profile_r
+#else
+ ret
+#endif
+
+collect_1:
+#ifdef PROFILE
+ movl $garbage_collector_name,a2
+ call profile_s
+#endif
+ mov a0,(a3)
+ add $4,a3
+ call collect_0_
+ mov -4(a3),a0
+ sub $4,a3
+#ifdef PROFILE
+ jmp profile_r
+#else
+ ret
+#endif
+
+#ifdef PROFILE
+collect_0:
+ movl $garbage_collector_name,a2
+ call profile_s
+ call collect_0_
+ jmp profile_r
+collect_0l:
+ pushl a2
+ movl $garbage_collector_name,a2
+ call profile_s
+ popl a2
+ call collect_0l_
+ jmp profile_r
+#endif
+
+#ifndef PROFILE
+collect_0:
+#endif
+collect_0_:
+ movl free_heap_offset(a4),a2
+#ifndef PROFILE
+collect_0l:
+#endif
+collect_0l_:
+ push d0
+ push d1
+
+ add $32,a2
+ sub free_heap_offset(a4),a2
+ shr $2,a2
+ mov a2,n_allocated_words_offset(a4)
+
+#ifdef MARK_AND_COPY_GC
+ testb $64,@flags
+ je no_mark3
+#endif
+
+#ifdef MARK_GC
+ movl bit_counter_offset(a4),a2
+ testl a2,a2
+ je no_scan
+
+ xorl d1,d1
+ pushl a3
+
+ movl n_allocated_words_offset(a4),a3
+ movl bit_vector_p_offset(a4),a0
+
+scan_bits:
+ cmpl (a0),d1
+ je zero_bits
+ movl d1,(a0)
+ addl $4,a0
+ subl $1,a2
+ jne scan_bits
+
+ jmp end_scan
+
+zero_bits:
+ lea 4(a0),a1
+ addl $4,a0
+ subl $1,a2
+ jne skip_zero_bits_lp1
+ jmp end_bits
+
+skip_zero_bits_lp:
+ testl d0,d0
+ jne end_zero_bits
+skip_zero_bits_lp1:
+ movl (a0),d0
+ addl $4,a0
+ subl $1,a2
+ jne skip_zero_bits_lp
+
+ testl d0,d0
+ je end_bits
+ movl d1,-4(a0)
+ movl a0,d0
+ subl a1,d0
+ jmp end_bits2
+
+end_zero_bits:
+ movl a0,d0
+ subl a1,d0
+ shll $3,d0
+ addl d0,n_free_words_after_mark_offset(a4)
+ movl d1,-4(a0)
+
+ cmpl a3,d0
+ jb scan_bits
+
+found_free_memory:
+ movl a2,bit_counter_offset(a4)
+ movl a0,bit_vector_p_offset(a4)
+
+ lea -4(a1),a2
+ subl heap_vector_offset(a4),a2
+ shll $5,a2
+ movl heap_p3_offset(a4),d1
+ addl d1,a2
+ movl a2,free_heap_offset(a4)
+
+ lea (a2,d0,4),d1
+ movl d1,heap_end_after_gc_offset(a4)
+ subl $32,d1
+ movl d1,end_heap_offset(a4)
+
+ popl a3
+ popl d1
+ popl d0
+ ret
+
+end_bits:
+ movl a0,d0
+ subl a1,d0
+ addl $4,d0
+end_bits2:
+ shll $3,d0
+ addl d0,n_free_words_after_mark_offset(a4)
+ cmpl a3,d0
+ jae found_free_memory
+
+end_scan:
+ popl a3
+ movl a2,bit_counter_offset(a4)
+
+no_scan:
+#endif
+
+#ifdef MARK_AND_COPY_GC
+no_mark3:
+#endif
+
+ movsbl garbage_collect_flag_offset(a4),d0
+ test d0,d0
+ jle collect
+
+ subl $2,d0
+ movb d0b,garbage_collect_flag_offset(a4)
+
+ movl extra_heap_size_offset(a4),d1
+ cmpl d1,a2
+ ja collect
+
+ movl extra_heap_offset(a4),a2
+ movl a2,free_heap_offset(a4)
+ lea (a2,d1,4),d1
+ movl d1,heap_end_after_gc_offset(a4)
+ subl $32,d1
+ movl d1,end_heap_offset(a4)
+
+ pop d1
+ pop d0
+ ret
+
+collect:
+ call add_execute_time
+
+ testl $4,@flags
+ je no_print_stack_sizes
+
+ push $garbage_collect_string_1
+ call @ew_print_string
+ add $4,sp
+
+ mov a3,d0
+ sub stack_p_offset(a4),d0
+ push d0
+ call @ew_print_int
+ add $4,sp
+
+ push $garbage_collect_string_2
+ call @ew_print_string
+ add $4,sp
+
+ mov halt_sp_offset(a4),d0
+ sub sp,d0
+ push d0
+ call @ew_print_int
+ add $4,sp
+
+ push $garbage_collect_string_3
+ call @ew_print_string
+ add $4,sp
+
+no_print_stack_sizes:
+ mov stack_p_offset(a4),d0
+ add a_stack_size_offset(a4),d0
+ cmp d0,a3
+ ja stack_overflow
+
+#ifdef MARK_AND_COPY_GC
+ testb $64,@flags
+ jne compacting_collector
+#else
+# ifdef MARK_GC
+ jmp compacting_collector
+# endif
+#endif
+
+#if defined (MARK_AND_COPY_GC) || !defined (MARK_GC)
+ cmpb $0,garbage_collect_flag_offset(a4)
+ jne compacting_collector
+
+ mov heap_copied_vector_offset(a4),a2
+
+ cmpl $0,heap_end_after_copy_gc_offset(a4)
+ je zero_all
+
+ movl free_heap_offset(a4),d0
+ subl heap_p1_offset(a4),d0
+ addl $63*4,d0
+ shr $8,d0
+ call zero_bit_vector
+
+ movl heap_end_after_copy_gc_offset(a4),a1
+ subl heap_p1_offset(a4),a1
+ shr $6,a1
+ andl $-4,a1
+
+ movl heap_copied_vector_offset(a4),a2
+ movl heap_copied_vector_size_offset(a4),d0
+ addl a1,a2
+ subl a1,d0
+ shr $2,d0
+
+ movl $0,heap_end_after_copy_gc_offset(a4)
+
+ call zero_bit_vector
+ jmp end_zero_bit_vector
+
+zero_all:
+ mov heap_copied_vector_size_offset(a4),d0
+ shr $2,d0
+ call zero_bit_vector
+
+end_zero_bit_vector:
+
+#include "icopy.s"
+
+#ifdef WRITE_HEAP
+ movl a3,heap2_begin_and_end
+#endif
+
+ neg a2
+ add a3,a2
+ shr $2,a2
+
+#ifdef MEASURE_GC
+ addl a2,total_gc_bytes_lo
+ jnc no_total_gc_bytes_carry1
+ incl total_gc_bytes_hi
+no_total_gc_bytes_carry1:
+#endif
+
+ pop a3
+
+ call add_garbage_collect_time
+
+ subl n_allocated_words_offset(a4),a2
+ jc switch_to_mark_scan
+
+ lea (a2,a2,4),d0
+ shl $5,d0
+ movl heap_size_offset(a4),d1
+ mov d1,a0
+ shl $2,d1
+ add a0,d1
+ add d1,d1
+ add a0,d1
+ cmp d1,d0
+ jnc no_mark_scan
+/ jmp no_mark_scan
+
+switch_to_mark_scan:
+ movl heap_size_33_offset(a4),d0
+ shl $5,d0
+ movl heap_p_offset(a4),d1
+
+ movl heap_p1_offset(a4),a0
+ cmpl heap_p2_offset(a4),a0
+ jc vector_at_begin
+
+vector_at_end:
+ movl d1,heap_p3_offset(a4)
+ add d0,d1
+ movl d1,heap_vector_offset(a4)
+
+ movl heap_p1_offset(a4),d0
+ movl d0,extra_heap_offset(a4)
+ subl d0,d1
+ shr $2,d1
+ movl d1,extra_heap_size_offset(a4)
+ jmp switch_to_mark_scan_2
+
+vector_at_begin:
+ movl d1,heap_vector_offset(a4)
+ addl heap_size_offset(a4),d1
+ subl d0,d1
+ movl d1,heap_p3_offset(a4)
+
+ movl d1,extra_heap_offset(a4)
+ movl heap_p2_offset(a4),a0
+ subl d1,a0
+ shr $2,a0
+ movl a0,extra_heap_size_offset(a4)
+
+switch_to_mark_scan_2:
+ movl heap_size_offset(a4),d0
+ shr $3,d0
+ sub a2,d0
+ shl $2,d0
+
+ movb $1,garbage_collect_flag_offset(a4)
+
+ test a2,a2
+ jns end_garbage_collect
+
+ movb $-1,garbage_collect_flag_offset(a4)
+
+ movl extra_heap_size_offset(a4),d1
+ movl d1,d0
+ subl n_allocated_words_offset(a4),d0
+ js out_of_memory_4
+
+ movl extra_heap_offset(a4),a2
+ shl $2,d1
+ movl a2,free_heap_offset(a4)
+ addl a2,d1
+ movl d1,heap_end_after_gc_offset(a4)
+#ifdef WRITE_HEAP
+ movl a2,heap_end_write_heap
+#endif
+ subl $32,d1
+ movl d1,end_heap_offset(a4)
+#ifdef WRITE_HEAP
+ movl $1,d3_flag_write_heap
+ jmp end_garbage_collect_
+#else
+ jmp end_garbage_collect
+#endif
+no_mark_scan:
+/ exchange the semi_spaces
+ mov heap_p1_offset(a4),d0
+ mov heap_p2_offset(a4),d1
+ mov d0,heap_p2_offset(a4)
+ mov d1,heap_p1_offset(a4)
+
+ mov heap_size_129_offset(a4),d0
+ shl $6-2,d0
+
+# ifdef MUNMAP
+ mov heap_p2_offset(a4),d1
+ lea (d1,d0,4),a0
+ add $4095,d1
+ andl $-4096,d1
+ andl $-4096,a0
+ sub d1,a0
+ jbe no_pages
+ push d0
+
+ push a0
+ push d1
+ call _munmap
+ add $8,sp
+
+ pop d0
+no_pages:
+# endif
+
+# ifdef ADJUST_HEAP_SIZE
+ movl d0,d1
+# endif
+ sub a2,d0
+
+# ifdef ADJUST_HEAP_SIZE
+ movl d0,a0
+ imull @heap_size_multiple
+ shrd $9,a1,d0
+ shr $9,a1
+ jne no_small_heap1
+
+ cmpl $(MINIMUM_HEAP_SIZE_2),d0
+ jae not_too_small1
+ movl $(MINIMUM_HEAP_SIZE_2),d0
+not_too_small1:
+ subl d0,d1
+ jb no_small_heap1
+
+ shl $2,d1
+ movl heap_end_after_gc_offset(a4),a2
+ subl d1,end_heap_offset(a4)
+ movl a2,heap_end_after_copy_gc_offset(a4)
+ subl d1,a2
+ movl a2,heap_end_after_gc_offset(a4)
+
+no_small_heap1:
+ movl a0,d0
+# endif
+
+ shl $2,d0
+#endif
+
+end_garbage_collect:
+#ifdef WRITE_HEAP
+ movl a4,heap_end_write_heap
+ movl $0,d3_flag_write_heap
+end_garbage_collect_:
+#endif
+
+ pushl d0
+
+ testl $2,@flags
+ je no_heap_use_message
+
+ pushl d0
+
+ push $heap_use_after_gc_string_1
+ call @ew_print_string
+ add $4,sp
+
+ call @ew_print_int
+ add $4,sp
+
+ push $heap_use_after_gc_string_2
+ call @ew_print_string
+ add $4,sp
+
+no_heap_use_message:
+
+#ifdef FINALIZERS
+ call call_finalizers
+#endif
+
+ popl d0
+
+#ifdef WRITE_HEAP
+ /* Check whether memory profiling is on or off */
+ testb $32,@flags
+ je no_write_heap
+
+ cmpl @min_write_heap_size,d0
+ jb no_write_heap
+
+ pushl a0
+ pushl a1
+ pushl a2
+ pushl a3
+ pushl a4
+
+ subl $64,sp
+
+ movl d3_flag_write_heap,d0
+ test d0,d0
+ jne copy_to_compact_with_alloc_in_extra_heap
+
+ movsbl garbage_collect_flag_offset(a4),d0
+
+ movl heap2_begin_and_end,a0
+ movl heap2_begin_and_end+4,a1
+
+ lea heap_p1_offset(a4),d1
+
+ testl d0,d0
+ je gc0
+
+ lea heap_p2_offset(a4),d1
+ jg gc1
+
+ lea heap_p3_offset(a4),d1
+ xor a0,a0
+ xor a1,a1
+
+gc0:
+gc1:
+ movl (d1),d1
+
+ /* fill record */
+
+ movl sp,d0
+
+ movl d1,0(d0)
+ movl a4,4(d0) // klop dit?
+
+ movl a0,8(d0) // heap2_begin
+ movl a1,12(d0) // heap2_end
+
+ movl stack_p_offset(a4),d1
+ movl d1,16(d0) // stack_begin
+
+ movl a3,20(d0) // stack_end
+ movl $0,24(d0) // text_begin
+ movl $0,28(d0) // data_begin
+
+ movl $small_integers,32(d0) // small_integers
+ movl $static_characters,36(d0) // small_characters
+
+ movl $INT+2,40(d0) // INT-descP
+ movl $CHAR+2,44(d0) // CHAR-descP
+ movl $REAL+2,48(d0) // REAL-descP
+ movl $BOOL+2,52(d0) // BOOL-descP
+ movl $__STRING__+2,56(d0) // STRING-descP
+ movl $__ARRAY__+2,60(d0) // ARRAY-descP
+
+ pushl d0
+ call @write_heap
+
+ addl $68,sp
+
+ popl a4
+ popl a3
+ popl a2
+ popl a1
+ popl a0
+no_write_heap:
+
+#endif
+
+ movl free_heap_offset(a4),a2
+
+ pop d1
+ pop d0
+ ret
+
+#ifdef FINALIZERS
+call_finalizers:
+ movl free_finalizer_list,d0
+
+call_finalizers_lp:
+ cmpl $__Nil-4,d0
+ je end_call_finalizers
+ pushl 4(d0)
+ movl 8(d0),d1
+ pushl 4(d1)
+ call *(d1)
+ addl $4,sp
+ pop d0
+ jmp call_finalizers_lp
+end_call_finalizers:
+
+ movl $__Nil-4,free_finalizer_list
+ ret
+#endif
+
+#ifdef WRITE_HEAP
+copy_to_compact_with_alloc_in_extra_heap:
+ movl heap2_begin_and_end,a0
+ movl heap2_begin_and_end+4,a1
+ lea heap_p2_offset(a4),d1
+ jmp gc1
+#endif
+
+out_of_memory_4:
+ call add_garbage_collect_time
+
+ mov $out_of_memory_string_4,a2
+ jmp print_error
+
+zero_bit_vector:
+ xorl a1,a1
+ testb $1,d0b
+ je zero_bits1_1
+ mov a1,(a2)
+ add $4,a2
+zero_bits1_1:
+ shr $1,d0
+
+ mov d0,d1
+ shr $1,d0
+ testb $1,d1b
+ je zero_bits1_5
+
+ sub $8,a2
+ jmp zero_bits1_2
+
+zero_bits1_4:
+ mov a1,(a2)
+ mov a1,4(a2)
+zero_bits1_2:
+ mov a1,8(a2)
+ mov a1,12(a2)
+ add $16,a2
+zero_bits1_5:
+ sub $1,d0
+ jae zero_bits1_4
+ ret
+
+reorder:
+ pushl a3
+ pushl a2
+
+ movl d0,a2
+ shl $2,a2
+ movl d1,a3
+ shl $2,a3
+ addl a3,a0
+ subl a2,a1
+
+ pushl a3
+ pushl a2
+ pushl d1
+ pushl d0
+ jmp st_reorder_lp
+
+reorder_lp:
+ movl (a0),a2
+ movl -4(a1),a3
+ movl a2,-4(a1)
+ subl $4,a1
+ movl a3,(a0)
+ addl $4,a0
+
+ dec d0
+ jne next_b_in_element
+ movl (sp),d0
+ addl 12(sp),a0
+next_b_in_element:
+ dec d1
+ jne next_a_in_element
+ movl 4(sp),d1
+ subl 8(sp),a1
+next_a_in_element:
+st_reorder_lp:
+ cmpl a0,a1
+ ja reorder_lp
+
+ popl d0
+ popl d1
+ addl $8,sp
+ popl a2
+ popl a3
+ ret
+
+/
+/ the sliding compacting garbage collector
+/
+
+compacting_collector:
+/ zero all mark bits
+
+ movl heap_p3_offset(a4),d0
+ negl d0
+ movl d0,neg_heap_p3_offset(a4)
+
+ movl a3,stack_top_offset(a4)
+
+#ifdef MARK_GC
+# ifdef MARK_AND_COPY_GC
+ testb $64,@flags
+ je no_mark4
+# endif
+ cmpl $0,zero_bits_before_mark_offset(a4)
+ je no_zero_bits
+
+ movl $0,zero_bits_before_mark_offset(a4)
+
+# ifdef MARK_AND_COPY_GC
+no_mark4:
+# endif
+#endif
+
+ movl heap_vector_offset(a4),a2
+ movl heap_size_33_offset(a4),d0
+ addl $3,d0
+ shr $2,d0
+
+ xorl d1,d1
+
+ testb $1,d0b
+ je zero_bits_1
+ movl d1,(a2)
+ addl $4,a2
+zero_bits_1:
+ movl d0,a0
+ shr $2,d0
+
+ testb $2,a0b
+ je zero_bits_5
+
+ subl $8,a2
+ jmp zero_bits_2
+
+zero_bits_4:
+ movl d1,(a2)
+ movl d1,4(a2)
+zero_bits_2:
+ movl d1,8(a2)
+ movl d1,12(a2)
+ addl $16,a2
+zero_bits_5:
+ subl $1,d0
+ jnc zero_bits_4
+
+#ifdef MARK_GC
+# ifdef MARK_AND_COPY_GC
+ testb $64,@flags
+ je no_mark5
+# endif
+no_zero_bits:
+ movl n_last_heap_free_bytes_offset(a4),d0
+ movl n_free_words_after_mark_offset(a4),d1
+
+#if 1
+ shrl $2,d0
+#else
+ shll $2,d1
+#endif
+
+ movl d1,a2
+ shll $3,a2
+ addl d1,a2
+ shrl $2,a2
+
+ cmpl a2,d0
+ jg compact_gc
+
+# ifdef ADJUST_HEAP_SIZE
+ movl bit_vector_size_offset(a4),d1
+ shl $2,d1
+
+ subl d1,d0
+ negl d0
+
+ imull @heap_size_multiple
+ shrd $7,a1,d0
+ shr $7,a1
+ jne no_smaller_heap
+
+ cmpl d1,d0
+ jae no_smaller_heap
+
+ cmpl $(MINIMUM_HEAP_SIZE),d1
+ jbe no_smaller_heap
+
+ jmp compact_gc
+no_smaller_heap:
+# endif
+
+#include "imark.s"
+
+compact_gc:
+ movl $1,zero_bits_before_mark_offset(a4)
+ movl $0,n_last_heap_free_bytes_offset(a4)
+ movl $1000,n_free_words_after_mark_offset(a4)
+# ifdef MARK_AND_COPY_GC
+no_mark5:
+# endif
+#endif
+
+#include "icompact.s"
+
+ movl stack_top_offset(a4),a3
+
+ movl heap_size_33_offset(a4),d1
+ shl $5,d1
+ addl heap_p3_offset(a4),d1
+
+ movl a2,free_heap_offset(a4)
+ movl d1,heap_end_after_gc_offset(a4)
+ lea -32(d1),d0
+ movl d0,end_heap_offset(a4)
+
+ subl a2,d1
+ shr $2,d1
+
+ subl n_allocated_words_offset(a4),d1
+ jc out_of_memory_4
+
+ cmpl $107374182,d1
+ jae not_out_of_memory
+ movl d1,d0
+ shl $2,d0
+ addl d1,d0
+ shl $3,d0
+ cmpl heap_size_offset(a4),d0
+ jc out_of_memory_4
+not_out_of_memory:
+
+#if defined (MARK_GC) || defined (COMPACT_GC_ONLY)
+# if defined (MARK_GC) && defined (ADJUST_HEAP_SIZE)
+# ifdef MARK_AND_COPY_GC
+ testb $64,@flags
+ je no_mark_6
+# endif
+
+ movl neg_heap_p3_offset(a4),d0
+ addl a2,d0
+ movl n_allocated_words_offset(a4),d1
+ lea (d0,d1,4),d0
+
+ movl heap_size_33_offset(a4),d1
+ shl $5,d1
+
+ imull @heap_size_multiple
+ shrd $8,a1,d0
+ shr $8,a1
+ jne no_small_heap2
+
+ andl $-4,d0
+
+ cmpl $(MINIMUM_HEAP_SIZE),d0
+ jae not_too_small2
+ movl $(MINIMUM_HEAP_SIZE),d0
+not_too_small2:
+ movl d1,a0
+ subl d0,a0
+ jb no_small_heap2
+
+ subl a0,heap_end_after_gc_offset(a4)
+ subl a0,end_heap_offset(a4)
+
+ movl d0,d1
+
+no_small_heap2:
+ shr $2,d1
+ movl d1,bit_vector_size_offset(a4)
+
+# ifdef MARK_AND_COPY_GC
+no_mark_6:
+# endif
+# endif
+ jmp no_copy_garbage_collection
+#else
+ shl $2,d0
+ movl heap_size_offset(a4),a0
+ shl $5,a0
+ subl heap_size_offset(a4),a0
+ cmpl a0,d0
+ jle no_copy_garbage_collection
+
+ movl heap_p_offset(a4),d0
+ movl d0,heap_p1_offset(a4)
+
+ movl heap_size_129_offset(a4),d1
+ shl $6,d1
+ addl d1,d0
+ movl d0,heap_copied_vector_offset(a4)
+ movl d0,heap_end_after_gc_offset(a4)
+ lea -32(d0),d1
+ movl d1,end_heap_offset(a4)
+ movl heap_copied_vector_size_offset(a4),d1
+ addl d0,d1
+ movl d1,heap_p2_offset(a4)
+
+ movl heap_p3_offset(a4),d0
+ cmpl heap_vector_offset(a4),d0
+ jle vector_at_end_2
+
+ movl heap_vector_offset(a4),d1
+ movl d1,extra_heap_offset(a4)
+ subl d1,d0
+ shr $2,d0
+ movl d0,extra_heap_size_offset(a4)
+
+ movb $2,garbage_collect_flag_offset(a4)
+ jmp no_copy_garbage_collection
+
+vector_at_end_2:
+ movb $0,garbage_collect_flag_offset(a4)
+#endif
+
+no_copy_garbage_collection:
+#ifdef MEASURE_GC
+ call add_compact_garbage_collect_time
+
+ movl free_heap_offset(a4),d0
+ subl heap_p3_offset(a4),d0
+
+ addl d0,total_compact_gc_bytes_lo
+ jnc no_total_compact_gc_bytes_carry
+ incl total_compact_gc_bytes_hi
+no_total_compact_gc_bytes_carry:
+#else
+ call add_garbage_collect_time
+#endif
+
+ movl free_heap_offset(a4),d0
+ subl heap_p3_offset(a4),d0
+ movl n_allocated_words_offset(a4),d1
+ lea (d0,d1,4),d0
+ jmp end_garbage_collect
+
+#if defined (_WINDOWS_) && defined (STACK_OVERFLOW_EXCEPTION_HANDLER)
+ .globl _clean_exception_handler?4
+_clean_exception_handler?4:
+ movl 4(%esp),%eax
+ movl (%eax),%eax
+ cmpl $0xc00000fd,(%eax) // EXCEPTION_STACK_OVERFLOW
+ je stack_overflow_exception
+
+ cmpl $0x80000001,(%eax) // EXCEPTION_GUARD_PAGE
+ je guard_page_or_access_violation_exception
+
+ cmpl $0xc0000005,(%eax) // EXCEPTION_ACCESS_VIOLATION
+ je guard_page_or_access_violation_exception
+
+no_stack_overflow_exception:
+ movl $0,%eax // EXCEPTION_CONTINUE_SEARCH
+ ret $4
+
+guard_page_or_access_violation_exception:
+ movl 0x18(%eax),%eax
+ andl $-4096,%eax
+ cmpl %eax,a_stack_guard_page
+ jne no_stack_overflow_exception
+
+ cmpl $0,a_stack_guard_page
+ je no_stack_overflow_exception
+
+stack_overflow_exception:
+ movl 4(%esp),%eax
+ movl 4(%eax),%eax
+ movl $stack_overflow,0xb8(%eax)
+
+ movl $-1,%eax // EXCEPTION_CONTINUE_EXECUTION
+ ret $4
+#endif
+
+stack_overflow:
+ call add_execute_time
+
+ mov $stack_overflow_string,a2
+ jmp print_error
+
+@IO_error:
+ addl $4,sp
+
+ pushl $IO_error_string
+ call @ew_print_string
+ addl $4,sp
+
+ call @ew_print_string
+ addl $4,sp
+
+ pushl $new_line_string
+ call @ew_print_string
+ addl $4,sp
+
+ jmp halt
+
+print_error:
+ push a2
+ call @ew_print_string
+ add $4,sp
+
+halt:
+ mov halt_sp_offset(a4),sp
+
+#ifdef PROFILE
+ call write_profile_stack
+#endif
+
+#ifdef _WINDOWS_
+# if 0
+ testb $8,@flags
+ jne exit
+ testb $16,@flags
+ je exit
+ call @wait_for_key_press
+# endif
+#endif
+
+#ifdef _WINDOWS_
+ movl $1,@execution_aborted
+
+ cmpl $0,dll_initisialised
+ je exit
+
+ cmpl $0,@return_code
+ jne return_code_set
+ movl $-1,@return_code
+return_code_set:
+ pushl @return_code
+ call _ExitProcess?4
+ jmp return_code_set
+#else
+ jmp exit
+#endif
+
+e__system__eaind:
+__eaind:
+eval_fill:
+ mov a0,(a3)
+ add $4,a3
+ mov a1,a0
+ call *(a1)
+ mov a0,a1
+ mov -4(a3),a0
+ sub $4,a3
+
+ mov (a1),a2
+ mov a2,(a0)
+ mov 4(a1),a2
+ mov a2,4(a0)
+ mov 8(a1),a2
+ mov a2,8(a0)
+ ret
+
+ align (2)
+ movl $e__system__eaind,d0
+ jmp *d0
+ .space 5
+ .long e__system__dind
+ .long -2
+e__system__nind:
+__indirection:
+ mov 4(a0),a1
+ mov (a1),d0
+ testb $2,d0b
+#ifdef MARK_GC
+ je eval_fill2
+#else
+ je __cycle__in__spine
+#endif
+ mov d0,(a0)
+ mov 4(a1),a2
+ mov a2,4(a0)
+ mov 8(a1),a2
+ mov a2,8(a0)
+ ret
+
+#ifdef MARK_GC
+eval_fill2:
+ movl $__cycle__in__spine,(a0)
+ movl a0,(a3)
+# ifdef MARK_AND_COPY_GC
+ testb $64,@flags
+ je __cycle__in__spine
+# endif
+ addl $4,a3
+ movl a1,a0
+ call *d0
+ movl a0,a1
+ movl -4(a3),a0
+ subl $4,a3
+
+ mov (a1),a2
+ mov a2,(a0)
+ mov 4(a1),a2
+ mov a2,4(a0)
+ mov 8(a1),a2
+ mov a2,8(a0)
+ ret
+#endif
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_0:
+ movl $__indirection,(a1)
+ mov a0,4(a1)
+ jmp *a2
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_1:
+ movl $__indirection,(a1)
+ mov 4(a1),d0
+ mov a0,4(a1)
+ mov d0,a1
+ jmp *a2
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_2:
+ movl $__indirection,(a1)
+ mov 4(a1),d0
+ mov a0,4(a1)
+ mov a0,(a3)
+ add $4,a3
+ mov 8(a1),a0
+ mov d0,a1
+ jmp *a2
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_3:
+ movl $__indirection,(a1)
+ mov 4(a1),d0
+ mov a0,4(a1)
+ mov a0,(a3)
+ mov 12(a1),d1
+ mov d1,4(a3)
+ add $8,a3
+ mov 8(a1),a0
+ mov d0,a1
+ jmp *a2
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_4:
+ movl $__indirection,(a1)
+ mov 4(a1),d0
+ mov a0,4(a1)
+ mov a0,(a3)
+ mov 16(a1),d1
+ mov d1,4(a3)
+ mov 12(a1),d1
+ mov d1,8(a3)
+ add $12,a3
+ mov 8(a1),a0
+ mov d0,a1
+ jmp *a2
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_5:
+ movl $__indirection,(a1)
+ mov 4(a1),d0
+ mov a0,(a3)
+ mov a0,4(a1)
+ mov 20(a1),d1
+ mov d1,4(a3)
+ mov 16(a1),d1
+ mov d1,8(a3)
+ mov 12(a1),d1
+ mov d1,12(a3)
+ add $16,a3
+ mov 8(a1),a0
+ mov d0,a1
+ jmp *a2
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_6:
+ movl $__indirection,(a1)
+ mov 4(a1),d0
+ mov a0,(a3)
+ mov a0,4(a1)
+ mov 24(a1),d1
+ mov d1,4(a3)
+ mov 20(a1),d1
+ mov d1,8(a3)
+ mov 16(a1),d1
+ mov d1,12(a3)
+ mov 12(a1),d1
+ mov d1,16(a3)
+ add $20,a3
+ mov 8(a1),a0
+ mov d0,a1
+ jmp *a2
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_7:
+ mov $0,d0
+ mov $20,d1
+eval_upd_n:
+ movl $__indirection,(a1)
+ push 4(a1)
+ mov a0,(a3)
+ mov a0,4(a1)
+ add d1,a1
+ mov 8(a1),d1
+ mov d1,4(a3)
+ mov 4(a1),d1
+ mov d1,8(a3)
+ mov (a1),d1
+ mov d1,12(a3)
+ add $16,a3
+
+eval_upd_n_lp:
+ mov -4(a1),d1
+ sub $4,a1
+ mov d1,(a3)
+ add $4,a3
+ sub $1,d0
+ jnc eval_upd_n_lp
+
+ mov -4(a1),d1
+ mov d1,(a3)
+ add $4,a3
+ mov -8(a1),a0
+ pop a1
+ jmp *a2
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_8:
+ mov $1,d0
+ mov $24,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_9:
+ mov $2,d0
+ mov $28,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_10:
+ mov $3,d0
+ mov $32,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_11:
+ mov $4,d0
+ mov $36,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_12:
+ mov $5,d0
+ mov $40,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_13:
+ mov $6,d0
+ mov $44,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_14:
+ mov $7,d0
+ mov $48,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_15:
+ mov $8,d0
+ mov $52,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_16:
+ mov $9,d0
+ mov $56,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_17:
+ mov $10,d0
+ mov $60,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_18:
+ mov $11,d0
+ mov $64,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_19:
+ mov $12,d0
+ mov $68,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_20:
+ mov $13,d0
+ mov $72,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_21:
+ mov $14,d0
+ mov $76,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_22:
+ mov $15,d0
+ mov $80,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_23:
+ mov $16,d0
+ mov $84,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_24:
+ mov $17,d0
+ mov $88,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_25:
+ mov $18,d0
+ mov $92,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_26:
+ mov $19,d0
+ mov $96,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_27:
+ mov $20,d0
+ mov $100,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_28:
+ mov $21,d0
+ mov $104,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_29:
+ mov $22,d0
+ mov $108,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_30:
+ mov $23,d0
+ mov $112,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_31:
+ mov $24,d0
+ mov $116,d1
+ jmp eval_upd_n
+
+#ifdef PROFILE
+ call profile_n
+ movl d0,a2
+#endif
+eval_upd_32:
+ mov $25,d0
+ mov $120,d1
+ jmp eval_upd_n
+
+/
+/ STRINGS
+/
+
+ section (catAC)
+catAC:
+ mov 4(a0),a2
+ add 4(a1),a2
+ add $8+3,a2
+ shr $2,a2
+
+ movl free_heap_offset(a4),d0
+ lea -32(d0,a2,4),a2
+ cmpl end_heap_offset(a4),a2
+ jae gc_3
+gc_r_3:
+ push a4
+ movl free_heap_offset(a4),a4
+ push a4
+
+ mov 4(a0),d0
+ mov 4(a1),d1
+ add $8,a0
+ add $8,a1
+
+ movl $__STRING__+2,(a4)
+
+/ store length
+
+ mov d0,a2
+ add d1,a2
+ mov a2,4(a4)
+ add $8,a4
+
+/ copy string 1
+
+ lea 3(d1),a2
+ shr $2,a2
+ add a4,d1
+
+ xchg a2,%ecx
+ xchg a1,%esi
+ cld
+ rep
+ movsl
+ mov a1,%esi
+ mov a2,%ecx
+
+ mov d1,a4
+
+/ copy_string 2
+
+cat_string_6:
+ mov d0,a2
+ shr $2,a2
+ je cat_string_9
+
+cat_string_7:
+ mov (a0),d1
+ add $4,a0
+ mov d1,(a4)
+ add $4,a4
+ dec a2
+ jne cat_string_7
+
+cat_string_9:
+ testb $2,d0b
+ je cat_string_10
+ movw (a0),d1w
+ add $2,a0
+ movw d1w,(a4)
+ add $2,a4
+cat_string_10:
+ testb $1,d0b
+ je cat_string_11
+ movb (a0),d1b
+ movb d1b,(a4)
+ inc a4
+cat_string_11:
+ lea 3(a4),a2
+ pop a0
+ pop a4
+ andl $-4,a2
+ mov a2,free_heap_offset(a4)
+ ret
+
+gc_3: call collect_2l
+ jmp gc_r_3
+
+empty_string:
+ movl $zero_length_string,a0
+ ret
+
+ section (sliceAC)
+sliceAC:
+ mov 4(a0),a2
+ test d1,d1
+ jns slice_string_1
+ xorl d1,d1
+slice_string_1:
+ cmp a2,d1
+ jge empty_string
+ cmp d1,d0
+ jl empty_string
+ inc d0
+ cmp a2,d0
+ jle slice_string_2
+ mov a2,d0
+slice_string_2:
+ sub d1,d0
+
+ lea 8+3(d0),a2
+ shr $2,a2
+
+ movl free_heap_offset(a4),a1
+ lea -32(a1,a2,4),a1
+ cmpl end_heap_offset(a4),a1
+ jae gc_4
+r_gc_4:
+ lea 8(a0,d1),a1
+ movl a4,d1
+ movl free_heap_offset(a4),a4
+ sub $2,a2
+
+ movl $__STRING__+2,(a4)
+ mov d0,4(a4)
+
+/ copy part of string
+ mov a2,%ecx
+ movl a4,a2
+ add $8,a4
+
+ xchg a1,%esi
+ cld
+ rep
+ movsl
+ mov a1,%esi
+ mov a2,a0
+
+ movl a4,free_heap_offset(d1)
+ movl d1,a4
+ ret
+
+gc_4:
+ movl a1,a2
+ call collect_1l
+ lea 8+3(d0),a2
+ shr $2,a2
+ jmp r_gc_4
+
+ section (updateAC)
+updateAC:
+ mov 4(a0),a2
+ cmp a2,d1
+ jae update_string_error
+
+ add $8+3,a2
+ shr $2,a2
+
+ movl free_heap_offset(a4),a1
+ lea -32(a1,a2,4),a2
+ cmpl end_heap_offset(a4),a2
+ jae gc_5
+r_gc_5:
+ lea 8(a0),a1
+ mov 4(a0),a0
+
+ movl a4,a2
+ movl free_heap_offset(a4),a4
+
+ movl $__STRING__+2,(a4)
+ mov a0,4(a4)
+ add $8,a4
+
+ add a4,d1
+
+ add $3,a0
+ shr $2,a0
+
+ xchg a1,%esi
+ cld
+ rep
+ movsl
+ mov a1,%esi
+
+ movb d0b,(d1)
+
+ movl free_heap_offset(a2),a0
+ movl a4,free_heap_offset(a2)
+ movl a2,a4
+ ret
+
+gc_5: call collect_1l
+ jmp r_gc_5
+
+update_string_error:
+ movl $high_index_string,a2
+ test d0,d0
+ jns update_string_error_2
+ movl $low_index_string,a2
+update_string_error_2:
+ jmp print_error
+
+ section (eqAC)
+eqAC:
+ mov 4(a0),d0
+ cmp 4(a1),d0
+ jne equal_string_ne
+ add $8,a0
+ add $8,a1
+ mov d0,d1
+ andl $3,d1
+ shr $2,d0
+ je equal_string_b
+equal_string_1:
+ mov (a0),a2
+ cmp (a1),a2
+ jne equal_string_ne
+ add $4,a0
+ add $4,a1
+ dec d0
+ jne equal_string_1
+equal_string_b:
+ testb $2,d1b
+ je equal_string_2
+ movw (a0),d0w
+ cmpw (a1),d0w
+ jne equal_string_ne
+ add $2,a0
+ add $2,a1
+equal_string_2:
+ testb $1,d1b
+ je equal_string_eq
+ movb (a0),d1b
+ cmpb (a1),d1b
+ jne equal_string_ne
+equal_string_eq:
+ mov $1,d0
+ ret
+equal_string_ne:
+ xorl d0,d0
+ ret
+
+ section (cmpAC)
+cmpAC:
+ mov 4(a0),d1
+ mov 4(a1),a2
+ add $8,a0
+ add $8,a1
+ cmp d1,a2
+ jb cmp_string_less
+ ja cmp_string_more
+ xorl d0,d0
+ jmp cmp_string_chars
+cmp_string_more:
+ mov $1,d0
+ jmp cmp_string_chars
+cmp_string_less:
+ mov $-1,d0
+ mov a2,d1
+ jmp cmp_string_chars
+
+cmp_string_1:
+ mov (a1),a2
+ cmp (a0),a2
+ jne cmp_string_ne4
+ add $4,a1
+ add $4,a0
+cmp_string_chars:
+ sub $4,d1
+ jnc cmp_string_1
+cmp_string_b:
+ testb $2,d1b
+ je cmp_string_2
+ movb (a1),%bh
+ cmpb (a0),%bh
+ jne cmp_string_ne
+ movb 1(a1),%bh
+ cmpb 1(a0),%bh
+ jne cmp_string_ne
+ add $2,a1
+ add $2,a0
+cmp_string_2:
+ testb $1,d1b
+ je cmp_string_eq
+ movb (a1),d1b
+ cmpb (a0),d1b
+ jne cmp_string_ne
+cmp_string_eq:
+ ret
+cmp_string_ne4:
+ movb (a1),d1b
+ cmpb (a0),d1b
+ jne cmp_string_ne
+ movb 1(a1),d1b
+ cmpb 1(a0),d1b
+ jne cmp_string_ne
+ movb 2(a1),d1b
+ cmpb 2(a0),d1b
+ jne cmp_string_ne
+ movb 3(a1),d1b
+ cmpb 3(a0),d1b
+cmp_string_ne:
+ ja cmp_string_r1
+ mov $-1,d0
+ ret
+cmp_string_r1:
+ mov $1,d0
+ ret
+
+ section (string_to_string_node)
+string_to_string_node:
+ movl (a0),d0
+ addl $4,a0
+
+ lea 3(d0),d1
+ shr $2,d1
+
+ movl free_heap_offset(a4),a1
+ lea -32+8(a1,d1,4),a2
+ cmpl end_heap_offset(a4),a2
+ jae string_to_string_node_gc
+
+string_to_string_node_r:
+ movl free_heap_offset(a4),a2
+ movl a2,a1
+ movl $__STRING__+2,(a2)
+ movl d0,4(a2)
+ addl $8,a2
+ jmp string_to_string_node_4
+
+string_to_string_node_2:
+ movl (a0),d0
+ addl $4,a0
+ movl d0,(a2)
+ addl $4,a2
+string_to_string_node_4:
+ subl $1,d1
+ jge string_to_string_node_2
+
+ movl a2,free_heap_offset(a4)
+ movl a1,a0
+ ret
+
+string_to_string_node_gc:
+ push a0
+ call collect_0l
+ pop a0
+ jmp string_to_string_node_r
+
+ section (int_array_to_node)
+int_array_to_node:
+ movl -8(a0),d0
+
+ movl free_heap_offset(a4),a2
+ lea -32+12(a2,d0,4),a2
+ cmpl end_heap_offset(a4),a2
+ jae int_array_to_node_gc
+
+int_array_to_node_r:
+ movl free_heap_offset(a4),a2
+ movl $__ARRAY__+2,(a2)
+ movl a0,a1
+ movl d0,4(a2)
+ movl a2,a0
+ movl $INT+2,8(a2)
+ addl $12,a2
+ jmp int_array_to_node_4
+
+int_array_to_node_2:
+ movl (a1),d1
+ addl $4,a1
+ movl d1,(a2)
+ addl $4,a2
+int_array_to_node_4:
+ subl $1,d0
+ jge int_array_to_node_2
+
+ movl a2,free_heap_offset(a4)
+ ret
+
+int_array_to_node_gc:
+ push a0
+ call collect_0l
+ pop a0
+ jmp int_array_to_node_r
+
+ section (real_array_to_node)
+real_array_to_node:
+ movl -8(a0),d0
+
+ movl free_heap_offset(a4),a2
+ lea -32+12+4(a2,d0,8),a2
+ cmpl end_heap_offset(a2),a2
+ jae real_array_to_node_gc
+
+real_array_to_node_r:
+ movl free_heap_offset(a4),a2
+ orl $4,a2
+ movl a0,a1
+ movl $__ARRAY__+2,(a2)
+ movl d0,4(a2)
+ movl a2,a0
+ movl $REAL+2,8(a2)
+ addl $12,a2
+ jmp real_array_to_node_4
+
+real_array_to_node_2:
+ movl (a1),d1
+ movl d1,(a2)
+ movl 4(a1),d1
+ addl $8,a1
+ movl d1,4(a2)
+ addl $8,a2
+real_array_to_node_4:
+ subl $1,d0
+ jge real_array_to_node_2
+
+ movl a2,free_heap_offset(a4)
+ ret
+
+real_array_to_node_gc:
+ push a0
+ call collect_0l
+ pop a0
+ jmp real_array_to_node_r
+
+ align (2)
+ .long 3
+_c3: jmp __cycle__in__spine
+ align (2)
+
+ .long 4
+_c4: jmp __cycle__in__spine
+ align (2)
+ .long 5
+_c5: jmp __cycle__in__spine
+ align (2)
+ .long 6
+_c6: jmp __cycle__in__spine
+ align (2)
+ .long 7
+_c7: jmp __cycle__in__spine
+ align (2)
+ .long 8
+_c8: jmp __cycle__in__spine
+ align (2)
+ .long 9
+_c9: jmp __cycle__in__spine
+ align (2)
+ .long 10
+_c10: jmp __cycle__in__spine
+ align (2)
+ .long 11
+_c11: jmp __cycle__in__spine
+ align (2)
+ .long 12
+_c12: jmp __cycle__in__spine
+ align (2)
+ .long 13
+_c13: jmp __cycle__in__spine
+ align (2)
+ .long 14
+_c14: jmp __cycle__in__spine
+ align (2)
+ .long 15
+_c15: jmp __cycle__in__spine
+ align (2)
+ .long 16
+_c16: jmp __cycle__in__spine
+ align (2)
+ .long 17
+_c17: jmp __cycle__in__spine
+ align (2)
+ .long 18
+_c18: jmp __cycle__in__spine
+ align (2)
+ .long 19
+_c19: jmp __cycle__in__spine
+ align (2)
+ .long 20
+_c20: jmp __cycle__in__spine
+ align (2)
+ .long 21
+_c21: jmp __cycle__in__spine
+ align (2)
+ .long 22
+_c22: jmp __cycle__in__spine
+ align (2)
+ .long 23
+_c23: jmp __cycle__in__spine
+ align (2)
+ .long 24
+_c24: jmp __cycle__in__spine
+ align (2)
+ .long 25
+_c25: jmp __cycle__in__spine
+ align (2)
+ .long 26
+_c26: jmp __cycle__in__spine
+ align (2)
+ .long 27
+_c27: jmp __cycle__in__spine
+ align (2)
+ .long 28
+_c28: jmp __cycle__in__spine
+ align (2)
+ .long 29
+_c29: jmp __cycle__in__spine
+ align (2)
+ .long 30
+_c30: jmp __cycle__in__spine
+ align (2)
+ .long 31
+_c31: jmp __cycle__in__spine
+ align (2)
+ .long 32
+_c32: jmp __cycle__in__spine
+
+/
+/ ARRAYS
+/
+
+_create_arrayB:
+ movl d0,d1
+ addl $3,d0
+ shr $2,d0
+
+ movl free_heap_offset(a4),a0
+ lea -32+12(a0,d0,4),a2
+ cmpl end_heap_offset(a4),a2
+ jb no_collect_4574
+ call collect_0l
+ movl free_heap_offset(a4),a0
+no_collect_4574:
+ movl $__ARRAY__+2,(a0)
+ movl d1,4(a0)
+ movl $BOOL+2,8(a0)
+ lea 12(a0,d0,4),a2
+ movl a2,free_heap_offset(a4)
+ ret
+
+_create_arrayC:
+ movl d0,d1
+ addl $3,d0
+ shr $2,d0
+
+ movl free_heap_offset(a4),a0
+ lea -32+8(a0,d0,4),a2
+ cmpl end_heap_offset(a4),a2
+ jb no_collect_4573
+ call collect_0l
+ movl free_heap_offset(a4),a0
+no_collect_4573:
+ movl $__STRING__+2,(a0)
+ movl d1,4(a0)
+ lea 8(a0,d0,4),a2
+ movl a2,free_heap_offset(a4)
+ ret
+
+_create_arrayI:
+ movl free_heap_offset(a4),a0
+ lea -32+12(a0,d0,4),a2
+ cmpl end_heap_offset(a4),a2
+ jb no_collect_4572
+ call collect_0l
+ movl free_heap_offset(a4),a0
+no_collect_4572:
+ movl $__ARRAY__+2,(a0)
+ movl d0,4(a0)
+ movl $INT+2,8(a0)
+ lea 12(a0,d0,4),a2
+ movl a2,free_heap_offset(a4)
+ ret
+
+_create_arrayR:
+ movl free_heap_offset(a4),a0
+ lea -32+12+4(a0,d0,8),a2
+ cmpl end_heap_offset(a4),a2
+ jb no_collect_4580
+ call collect_0l
+ movl free_heap_offset(a4),a0
+no_collect_4580:
+ orl $4,a0
+ movl $__ARRAY__+2,(a0)
+ movl d0,4(a0)
+ movl $REAL+2,8(a0)
+ lea 12(a0,d0,8),a2
+ movl a2,free_heap_offset(a4)
+ ret
+
+/ vier(sp): number of elements, (sp): element descriptor
+/ d0: element size, d1: element a size a0: a_element-> a0: array
+
+_create_r_array:
+ movl 4(sp),a1
+
+ pushl d0
+
+ shl $2,a1
+ movl free_heap_offset(a4),a2
+ lea 12-32(a2),a2
+_sub_size_lp:
+ addl a1,a2
+ subl $1,d0
+ jne _sub_size_lp
+
+ popl d0
+
+ cmpl end_heap_offset(a4),a2
+ jb no_collect_4586
+ call collect_1l
+no_collect_4586:
+ movl a0,a2
+
+ movl free_heap_offset(a4),a0
+
+ popl 8(a0)
+ pop a1
+
+ movl $__ARRAY__+2,(a0)
+ movl a1,4(a0)
+ addl $12,a0
+
+/ a1: number of elements, a0: array
+/ d0: element size, d1: element a size a2:a_element
+
+ test d1,d1
+ je _create_r_array_0
+ subl $2,d1
+ jc _create_r_array_1
+ je _create_r_array_2
+ subl $2,d1
+ jc _create_r_array_3
+ je _create_r_array_4
+ jmp _create_r_array_5
+
+_create_r_array_0:
+ movl free_heap_offset(a4),d1
+ shl $2,a1
+ jmp _st_fillr0_array
+_fillr0_array:
+ addl a1,a0
+_st_fillr0_array:
+ subl $1,d0
+ jnc _fillr0_array
+ movl a0,free_heap_offset(a4)
+ movl d1,a0
+ ret
+
+_create_r_array_1:
+ movl free_heap_offset(a4),d1
+ shl $2,d0
+ jmp _st_fillr1_array
+_fillr1_array:
+ movl a2,(a0)
+ addl d0,a0
+_st_fillr1_array:
+ subl $1,a1
+ jnc _fillr1_array
+ movl a0,free_heap_offset(a4)
+ movl d1,a0
+ ret
+
+_create_r_array_2:
+ movl free_heap_offset(a4),d1
+ shl $2,d0
+ jmp _st_fillr2_array
+_fillr2_array:
+ movl a2,(a0)
+ movl a2,4(a0)
+ addl d0,a0
+_st_fillr2_array:
+ subl $1,a1
+ jnc _fillr2_array
+ movl a0,free_heap_offset(a4)
+ movl d1,a0
+ ret
+
+_create_r_array_3:
+ movl free_heap_offset(a4),d1
+ shl $2,d0
+ jmp _st_fillr3_array
+_fillr3_array:
+ movl a2,(a0)
+ movl a2,4(a0)
+ movl a2,8(a0)
+ addl d0,a0
+_st_fillr3_array:
+ subl $1,a1
+ jnc _fillr3_array
+ movl free_heap_offset(a4),a0
+ movl a0,free_heap_offset(a4)
+ movl d1,a0
+ ret
+
+_create_r_array_4:
+ movl free_heap_offset(a4),d1
+ shl $2,d0
+ jmp _st_fillr4_array
+_fillr4_array:
+ movl a2,(a0)
+ movl a2,4(a0)
+ movl a2,8(a0)
+ movl a2,12(a0)
+ addl d0,a0
+_st_fillr4_array:
+ subl $1,a1
+ jnc _fillr4_array
+ movl a0,free_heap_offset(a4)
+ movl d1,a0
+ ret
+
+_create_r_array_5:
+ push a3
+
+ movl d1,a3
+ subl $4,d0
+ subl d1,d0
+
+ subl $1,a3
+ shl $2,d0
+ jmp _st_fillr5_array
+_fillr5_array:
+ movl a2,(a0)
+ movl a2,4(a0)
+ movl a2,8(a0)
+ movl a2,12(a0)
+ addl $16,a0
+
+ movl a3,d1
+_copy_elem_5_lp:
+ movl a2,(a0)
+ addl $4,a0
+ subl $1,d1
+ jnc _copy_elem_5_lp
+
+ addl d0,a0
+_st_fillr5_array:
+ subl $1,a1
+ jnc _fillr5_array
+
+ pop a3
+
+ movl free_heap_offset(a4),d1
+ movl a0,free_heap_offset(a4)
+ movl d1,a0
+ ret
+
+create_arrayB:
+ movl d1,a1
+ addl $3,d1
+ shr $2,d1
+
+ movl free_heap_offset(a4),a0
+ lea -32+12(a0,d1,4),a2
+ cmpl end_heap_offset(a4),a2
+ jb no_collect_4575
+
+ pushl a1
+ call collect_0l
+ popl a1
+ movl free_heap_offset(a4),a0
+
+no_collect_4575:
+ movl d0,a2
+ shl $8,a2
+ orl a2,d0
+ movl d0,a2
+ shl $16,a2
+ orl a2,d0
+ movl $__ARRAY__+2,(a0)
+ movl a1,4(a0)
+ movl $BOOL+2,8(a0)
+ lea 12(a0),a2
+ jmp create_arrayBCI
+
+create_arrayC:
+ movl d1,a1
+ addl $3,d1
+ shr $2,d1
+
+ movl free_heap_offset(a4),a0
+ lea -32+8(a0,d1,4),a2
+ cmpl end_heap_offset(a4),a2
+ jb no_collect_4578
+
+ pushl a1
+ call collect_0l
+ popl a1
+ movl free_heap_offset(a4),a0
+
+no_collect_4578:
+ movl d0,a2
+ shl $8,a2
+ orl a2,d0
+ movl d0,a2
+ shl $16,a2
+ orl a2,d0
+ movl $__STRING__+2,(a0)
+ movl a1,4(a0)
+ lea 8(a0),a2
+ jmp create_arrayBCI
+
+create_arrayI:
+ movl free_heap_offset(a4),a0
+ lea -32+12(a0,d1,4),a2
+ cmpl end_heap_offset(a4),a2
+ jb no_collect_4577
+
+ call collect_0l
+ movl free_heap_offset(a4),a0
+
+no_collect_4577:
+ movl $__ARRAY__+2,(a0)
+ movl d1,4(a0)
+ lea 0(,d1,4),a1
+ movl $INT+2,8(a0)
+ lea 12(a0),a2
+create_arrayBCI:
+ mov d1,a1
+ shr $1,d1
+ testb $1,a1b
+ je st_filli_array
+
+ movl d0,(a2)
+ addl $4,a2
+ jmp st_filli_array
+
+filli_array:
+ movl d0,(a2)
+ movl d0,4(a2)
+ addl $8,a2
+st_filli_array:
+ subl $1,d1
+ jnc filli_array
+
+ movl a2,free_heap_offset(a4)
+ ret
+
+create_arrayR:
+ fstl -8(sp)
+
+ movl free_heap_offset(a4),a0
+ lea -32+12+4(a0,d0,8),a2
+
+ movl -8(sp),d1
+ movl -4(sp),a1
+
+ cmpl end_heap_offset(a4),a2
+ jb no_collect_4579
+ pushl a1
+ call collect_0l
+ popl a1
+ movl free_heap_offset(a4),a0
+no_collect_4579:
+ orl $4,a0
+
+ movl $__ARRAY__+2,(a0)
+ movl d0,4(a0)
+ movl $REAL+2,8(a0)
+ movl a0,a2
+ addl $12,a2
+ jmp st_fillr_array
+fillr_array:
+ movl d1,(a2)
+ movl a1,4(a2)
+ addl $8,a2
+st_fillr_array:
+ subl $1,d0
+ jnc fillr_array
+
+ movl a2,free_heap_offset(a4)
+ ret
+
+create_array:
+ movl free_heap_offset(a4),a1
+ lea -32+12(a1,d0,4),a2
+ cmpl end_heap_offset(a4),a2
+ jb no_collect_4576
+
+ call collect_1l
+ movl free_heap_offset(a4),a1
+
+no_collect_4576:
+ movl a0,d1
+ movl a1,a0
+ movl $__ARRAY__+2,(a1)
+ movl d0,4(a1)
+ movl $0,8(a1)
+ lea 12(a1),a2
+ jmp fillr1_array
+
+/ in 4(sp): number of elements, (sp): element descriptor
+/ d0: element size, d1: element a size -> a0: array
+
+create_R_array:
+ subl $2,d0
+ jc create_R_array_1
+ je create_R_array_2
+ subl $2,d0
+ jc create_R_array_3
+ je create_R_array_4
+ jmp create_R_array_5
+
+create_R_array_1:
+ pop a1
+ pop d0
+
+/ d0: number of elements, a1: element descriptor
+/ d1: element a size
+
+ movl free_heap_offset(a4),a0
+ lea -32+12(a0,d0,4),a2
+ cmpl end_heap_offset(a4),a2
+ jb no_collect_4581
+
+ pushl a1
+ call collect_0l
+ popl a1
+ movl free_heap_offset(a4),a0
+
+no_collect_4581:
+ movl $__ARRAY__+2,(a0)
+ movl d0,4(a0)
+ movl a1,8(a0)
+ lea 12(a0),a2
+
+ test d1,d1
+ je r_array_1_b
+
+ movl -4(a3),d1
+ jmp fillr1_array
+
+r_array_1_b:
+ movl 4(sp),d1
+
+fillr1_array:
+ movl d0,a1
+ shr $1,d0
+ testb $1,a1b
+ je st_fillr1_array_1
+
+ movl d1,(a2)
+ addl $4,a2
+ jmp st_fillr1_array_1
+
+fillr1_array_lp:
+ movl d1,(a2)
+ movl d1,4(a2)
+ addl $8,a2
+st_fillr1_array_1:
+ subl $1,d0
+ jnc fillr1_array_lp
+
+ movl a2,free_heap_offset(a4)
+ ret
+
+create_R_array_2:
+ pop a1
+ pop d0
+
+/ d0: number of elements, a1: element descriptor
+/ d1: element a size
+ movl free_heap_offset(a4),a0
+ lea -32+12(a0,d0,8),a2
+ cmpl end_heap_offset(a4),a2
+ jb no_collect_4582
+
+ pushl a1
+ call collect_0
+ popl a1
+ movl free_heap_offset(a4),a0
+
+no_collect_4582:
+ movl $__ARRAY__+2,(a0)
+ movl d0,4(a0)
+ movl a1,8(a0)
+ lea 12(a0),a2
+
+ cmpl $1,d1
+ jc r_array_2_bb
+ movl -4(a3),d1
+ je r_array_2_ab
+r_array_2_aa:
+ movl -8(a3),a1
+ jmp st_fillr2_array
+r_array_2_ab:
+ movl 4(sp),a1
+ jmp st_fillr2_array
+r_array_2_bb:
+ movl 4(sp),d1
+ movl 8(sp),a1
+ jmp st_fillr2_array
+
+fillr2_array_1:
+ movl d1,(a2)
+ movl a1,4(a2)
+ addl $8,a2
+st_fillr2_array:
+ subl $1,d0
+ jnc fillr2_array_1
+
+ movl a2,free_heap_offset(a4)
+ ret
+
+create_R_array_3:
+ pop a1
+ pop d0
+
+/ d0: number of elements, a1: element descriptor
+/ d1: element a size
+
+ movl free_heap_offset(a4),a0
+ lea -32+12(a0,d0,8),a2
+ lea (a2,d0,4),a2
+ cmpl end_heap_offset(a4),a2
+ jb no_collect_4583
+
+ pushl a1
+ call collect_0l
+ popl a1
+ movl free_heap_offset(a4),a0
+
+no_collect_4583:
+ movl $__ARRAY__+2,(a0)
+ movl d0,4(a0)
+ movl a1,8(a0)
+ lea 12(a0),a2
+
+ cmpl $1,d1
+ jc r_array_3_bbb
+ movl -4(a3),a0
+ je r_array_3_abb
+ movl -8(a3),a1
+ cmpl $2,d1
+ je r_array_3_aab
+r_array_3_aaa:
+ movl -12(a3),d1
+ jmp st_fillr3_array
+r_array_3_aab:
+ movl (sp),d1
+ jmp st_fillr3_array
+r_array_3_abb:
+ movl (sp),a1
+ movl 4(sp),d1
+ jmp st_fillr3_array
+r_array_3_bbb:
+ movl (sp),a0
+ movl 4(sp),a1
+ movl 8(sp),d1
+ jmp st_fillr3_array
+
+fillr3_array_1:
+ movl a0,(a2)
+ movl a1,4(a2)
+ movl d1,8(a2)
+ addl $12,a2
+st_fillr3_array:
+ subl $1,d0
+ jnc fillr3_array_1
+
+ movl free_heap_offset(a4),a0
+ movl a2,free_heap_offset(a4)
+ ret
+
+create_R_array_4:
+ pop a1
+ pop d0
+
+/ d0: number of elements, a1: element descriptor
+/ d1: element a size
+
+ movl d0,a2
+ shl $4,a2
+ movl free_heap_offset(a4),a0
+ lea -32+12(a0,a2),a2
+ cmpl end_heap_offset(a4),a2
+ jb no_collect_4584
+
+ pushl a1
+ call collect_0l
+ popl a1
+ movl free_heap_offset(a4),a0
+
+no_collect_4584:
+ movl $__ARRAY__+2,(a0)
+ movl d0,4(a0)
+ movl a1,8(a0)
+ lea 12(a0),a2
+
+ push a3
+
+ cmp $1,d1
+ jc r_array_4_bbbb
+ movl -4(a3),a0
+ je r_array_4_abbb
+ movl -8(a3),a1
+ cmp $3,d1
+ jc r_array_4_aabb
+ movl -12(a3),d1
+ je r_array_4_aaab
+r_array_4_aaaa:
+ movl -16(a3),a3
+ jmp st_fillr4_array
+r_array_4_aaab:
+ movl (sp),a3
+ jmp st_fillr4_array
+r_array_4_aabb:
+ movl 0(sp),d1
+ movl 4(sp),a3
+ jmp st_fillr4_array
+r_array_4_abbb:
+ movl (sp),a1
+ movl 4(sp),d1
+ movl 8(sp),a3
+ jmp st_fillr4_array
+r_array_4_bbbb:
+ movl (sp),a0
+ movl 4(sp),a1
+ movl 8(sp),d1
+ movl 12(sp),a3
+ jmp st_fillr4_array
+
+fillr4_array:
+ movl a0,(a2)
+ movl a1,4(a2)
+ movl d1,8(a2)
+ movl a3,12(a2)
+ addl $16,a2
+st_fillr4_array:
+ subl $1,d0
+ jnc fillr4_array
+
+ pop a3
+
+ movl free_heap_offset(a4),a0
+ movl a2,free_heap_offset(a4)
+ ret
+
+create_R_array_5:
+ pop a1
+ pop a0
+
+/ a0: number of elements, a1: element descriptor
+/ d0: element size-4, d1: element a size
+
+ movl a0,a2
+ shl $4,a2
+ add $12-32,a2
+ addl free_heap_offset(a4),a2
+
+ subl $1,d0
+
+ pushl d0
+sub_size_lp:
+ lea (a2,a0,4),a2
+ subl $1,d0
+ jnc sub_size_lp
+ popl d0
+
+ cmpl end_heap_offset(a4),a2
+ jb no_collect_4585
+
+ pushl a1
+ pushl a0
+ call collect_0l
+ popl a0
+ popl a1
+
+no_collect_4585:
+ movl free_heap_offset(a4),a2
+
+ movl $__ARRAY__+2,(a2)
+ movl a0,4(a2)
+ movl a1,8(a2)
+
+ popl a1
+ movl sp,4(a3)
+ movl a1,(a3)
+
+ test d1,d1
+ je r_array_5
+
+ movl d1,a1
+ shl $2,a1
+ negl a1
+ addl a3,a1
+ subl $1,d1
+
+copy_a_to_b_lp5:
+ pushl (a1)
+ addl $4,a1
+ subl $1,d1
+ jnc copy_a_to_b_lp5
+
+r_array_5:
+ addl $12,a2
+
+ pushl a3
+ jmp st_fillr5_array
+
+fillr5_array_1:
+ movl 4(sp),d1
+ movl d1,(a2)
+ movl 8(sp),d1
+ movl d1,4(a2)
+
+ movl 12(sp),d1
+ movl d1,8(a2)
+ movl 16(sp),d1
+ movl d1,12(a2)
+
+ lea 20(sp),a3
+ addl $16,a2
+ movl d0,a1
+
+copy_elem_lp5:
+ movl (a3),d1
+ addl $4,a3
+ movl d1,(a2)
+ addl $4,a2
+ subl $1,a1
+ jnc copy_elem_lp5
+
+st_fillr5_array:
+ subl $1,a0
+ jnc fillr5_array_1
+
+ popl a3
+
+ movl free_heap_offset(a4),a0
+ movl a2,free_heap_offset(a4)
+
+ movl 4(a3),sp
+ jmp *(a3)
+
+repl_args_b:
+ test d0,d0
+ jle repl_args_b_1
+
+ dec d0
+ je repl_args_b_4
+
+ mov 8(a0),a1
+ sub $2,d1
+ jne repl_args_b_2
+
+ mov a1,(a3)
+ add $4,a3
+ jmp repl_args_b_4
+
+repl_args_b_2:
+ lea (a1,d0,4),a1
+
+repl_args_b_3:
+ mov -4(a1),a2
+ sub $4,a1
+ mov a2,(a3)
+ add $4,a3
+ dec d0
+ jne repl_args_b_3
+
+repl_args_b_4:
+ mov 4(a0),a2
+ mov a2,(a3)
+ add $4,a3
+repl_args_b_1:
+ ret
+
+push_arg_b:
+ cmp $2,d1
+ jb push_arg_b_1
+ jne push_arg_b_2
+ cmp d0,d1
+ je push_arg_b_1
+push_arg_b_2:
+ mov 8(a0),a0
+ sub $2,d1
+push_arg_b_1:
+ mov (a0,d1,4),a0
+ ret
+
+del_args:
+ mov (a0),d1
+ sub d0,d1
+ movswl -2(d1),d0
+ sub $2,d0
+ jge del_args_2
+
+ mov d1,(a1)
+ mov 4(a0),a2
+ mov a2,4(a1)
+ mov 8(a0),a2
+ mov a2,8(a1)
+ ret
+
+del_args_2:
+ jne del_args_3
+
+ mov d1,(a1)
+ mov 4(a0),a2
+ mov a2,4(a1)
+ mov 8(a0),a2
+ mov (a2),a2
+ mov a2,8(a1)
+ ret
+
+del_args_3:
+ movl free_heap_offset(a4),a2
+ lea -32(a2,d0,4),a2
+ cmpl end_heap_offset(a4),a2
+ jae del_args_gc
+del_args_r_gc:
+ mov d1,(a1)
+ mov a4,8(a1)
+ mov 4(a0),a2
+ mov 8(a0),a0
+ mov a2,4(a1)
+
+ movl free_heap_offset(a4),a2
+del_args_copy_args:
+ mov (a0),d1
+ add $4,a0
+ mov d1,(a2)
+ add $4,a2
+ sub $1,d0
+ jg del_args_copy_args
+
+ movl a2,free_heap_offset(a4)
+ ret
+
+del_args_gc:
+ call collect_2l
+ jmp del_args_r_gc
+
+#if 0
+o__S_P2:
+ mov (a0),d0
+ mov 8(a0),a0
+ cmpw $2,-2(d0)
+ je o__S_P2_2
+ mov (a0),a0
+o__S_P2_2:
+ ret
+
+ea__S_P2:
+ mov 4(a1),d0
+ movl $__indirection,(a1)
+ mov a0,4(a1)
+ mov d0,a1
+ mov (a1),d0
+ testb $2,d0
+ jne ea__S_P2_1
+
+ mov a0,(a3)
+ add $4,a3
+ mov a1,a0
+ call *d0
+ mov a0,a1
+ mov -4(a3),a0
+ sub $4,a3
+
+ea__S_P2_1:
+ mov (a1),d0
+ mov 8(a1),a1
+ cmpw $2,-2(d0)
+ je ea__S_P2_2
+ mov (a1),a1
+ea__S_P2_2:
+ mov (a1),d0
+ testb $2,d0
+ jne ea__S_P2_3
+
+ sub $20,d0
+ jmp *d0
+
+ea__S_P2_3:
+ mov d0,(a0)
+ mov 4(a1),a2
+ mov a2,4(a0)
+ mov 8(a1),a2
+ mov a2,8(a0)
+ ret
+#endif
+
+#ifdef NOCLIB
+tan_real:
+ fptan
+ fstsw %ax
+ testb $0x04,%ah
+ fstp %st(0)
+ jnz tan_real_1
+ ret
+
+tan_real_1:
+ fldl NAN_real
+ fstp %st(1)
+ ret
+
+asin_real:
+ fld %st(0)
+ fmul %st(0)
+ fsubrl one_real
+ fsqrt
+ fpatan
+ ret
+
+acos_real:
+ fld %st(0)
+ fmul %st(0)
+ fsubrl one_real
+ fsqrt
+ fxch %st(1)
+ fpatan
+ ret
+
+atan_real:
+ fldl one_real
+ fpatan
+ ret
+
+ln_real:
+ fldln2
+ fxch %st(1)
+ fyl2x
+ ret
+
+@c_log10:
+ fldl 4(sp)
+log10_real:
+ fldlg2
+ fxch %st(1)
+ fyl2x
+ ret
+
+exp_real:
+ fldl2e
+ subl $16, sp
+ fmulp %st(1)
+
+ fstcw 8(sp)
+ movw 8(sp),%ax
+ andw $0xf3ff,%ax
+ orw $0x0400,%ax
+ movw %ax,10(sp)
+
+exp2_real_:
+ fld %st
+ fldcw 10(sp)
+ frndint
+ fldcw 8(sp)
+
+ fsubr %st,%st(1)
+ fxch %st(1)
+ f2xm1
+ faddl one_real
+ fscale
+ addl $16,sp
+ fstp %st(1)
+
+ ret
+
+pow_real:
+ sub $16,sp
+ fstcw 8(sp)
+ movw 8(sp),%ax
+ andw $0xf3ff,%ax
+
+ fxch %st(1)
+
+ movw %ax,10(sp)
+
+ fcoml zero_real
+ fnstsw %ax
+ sahf
+ jz pow_zero
+ jc pow_negative
+
+pow_real_:
+ fyl2x
+ jmp exp2_real_
+
+pow_negative:
+ fld %st(1)
+ fldcw 10(sp)
+ frndint
+ fistl 12(sp)
+ fldcw 8(sp)
+ fsub %st(2),%st
+
+ fcompl zero_real
+ fstsw %ax
+ sahf
+ jnz pow_real_
+
+ fchs
+ fyl2x
+
+ fld %st
+ fldcw 10(sp)
+ frndint
+ fldcw 8(sp)
+
+ fsubr %st,%st(1)
+ fxch %st(1)
+ f2xm1
+ faddl one_real
+ fscale
+
+ testl $1,12(sp)
+ fstp %st(1)
+ jz exponent_even
+ fchs
+exponent_even:
+ add $16,sp
+ ret
+
+pow_zero:
+ fld %st(1)
+ fcompl zero_real
+ fnstsw %ax
+ sahf
+ jbe pow_real_
+
+ fldl zero_real
+ fstp %st(1)
+ add $16,sp
+ ret
+
+truncate_real:
+ subl $8,sp
+ fstcw (sp)
+ movw (sp),%ax
+ orw $0x0c00,%ax
+ movw %ax,2(sp)
+ fldcw 2(sp)
+ fistl 4(sp)
+ fldcw (sp)
+ movl 4(sp),d0
+ addl $8,sp
+ ret
+
+entier_real:
+ subl $8,sp
+ fstcw (sp)
+ movw (sp),%ax
+ andw $0xf3ff,%ax
+ orw $0x0400,%ax
+ movw %ax,2(sp)
+ fldcw 2(sp)
+ fistl 4(sp)
+ fldcw (sp)
+ movl 4(sp),d0
+ addl $8,sp
+ ret
+
+ceiling_real:
+ subl $8,sp
+ fstcw (sp)
+ movw (sp),%ax
+ andw $0xf3ff,%ax
+ orw $0x0800,%ax
+ movw %ax,2(sp)
+ fldcw 2(sp)
+ fistl 4(sp)
+ fldcw (sp)
+ movl 4(sp),d0
+ addl $8,sp
+ ret
+
+round__real64:
+ fistpll 12(%ecx)
+ fldz
+ ret
+
+truncate__real64:
+ subl $4,sp
+ fstcw (sp)
+ movw (sp),%ax
+ orw $0x0c00,%ax
+ movw %ax,2(sp)
+ fldcw 2(sp)
+ fistpll 12(%ecx)
+ fldcw (sp)
+ addl $4,sp
+ fldz
+ ret
+
+entier__real64:
+ subl $4,sp
+ fstcw (sp)
+ movw (sp),%ax
+ andw $0xf3ff,%ax
+ orw $0x0400,%ax
+ movw %ax,2(sp)
+ fldcw 2(sp)
+ fistpll 12(%ecx)
+ fldcw (sp)
+ addl $4,sp
+ fldz
+ ret
+
+ceiling__real64:
+ subl $4,sp
+ fstcw (sp)
+ movw (sp),%ax
+ andw $0xf3ff,%ax
+ orw $0x0800,%ax
+ movw %ax,2(sp)
+ fldcw 2(sp)
+ fistpll 12(%ecx)
+ fldcw (sp)
+ addl $4,sp
+ fldz
+ ret
+
+int64a__to__real:
+ fildll 12(%ecx)
+ fstp %st(1)
+ ret
+
+@c_pow:
+ fldl 4(sp)
+ fldl 12(sp)
+ call pow_real
+ fstp %st(1)
+ ret
+
+@c_entier:
+ fldl 4(sp)
+ call entier_real
+ fstp %st(0)
+ ret
+#else
+ section (tan_real)
+tan_real:
+ sub $8,sp
+ fstpl (sp)
+ ffree %st(0)
+ ffree %st(1)
+ ffree %st(2)
+ ffree %st(3)
+ ffree %st(4)
+ ffree %st(5)
+ ffree %st(6)
+ ffree %st(7)
+ call @tan
+ add $8,sp
+ ret
+
+ section (asin_real)
+asin_real:
+ sub $8,sp
+ fstpl (sp)
+ ffree %st(0)
+ ffree %st(1)
+ ffree %st(2)
+ ffree %st(3)
+ ffree %st(4)
+ ffree %st(5)
+ ffree %st(6)
+ ffree %st(7)
+ call @asin
+ add $8,sp
+ ret
+
+ section (acos_real)
+acos_real:
+ sub $8,sp
+ fstpl (sp)
+ ffree %st(0)
+ ffree %st(1)
+ ffree %st(2)
+ ffree %st(3)
+ ffree %st(4)
+ ffree %st(5)
+ ffree %st(6)
+ ffree %st(7)
+ call @acos
+ add $8,sp
+ ret
+
+ section (atan_real)
+atan_real:
+ sub $8,sp
+ fstpl (sp)
+ ffree %st(0)
+ ffree %st(1)
+ ffree %st(2)
+ ffree %st(3)
+ ffree %st(4)
+ ffree %st(5)
+ ffree %st(6)
+ ffree %st(7)
+ call @atan
+ add $8,sp
+ ret
+
+ section (ln_real)
+ln_real:
+ sub $8,sp
+ fstpl (sp)
+ ffree %st(0)
+ ffree %st(1)
+ ffree %st(2)
+ ffree %st(3)
+ ffree %st(4)
+ ffree %st(5)
+ ffree %st(6)
+ ffree %st(7)
+ call @log
+ add $8,sp
+ ret
+
+ section (log10_real)
+log10_real:
+ sub $8,sp
+ fstpl (sp)
+ ffree %st(0)
+ ffree %st(1)
+ ffree %st(2)
+ ffree %st(3)
+ ffree %st(4)
+ ffree %st(5)
+ ffree %st(6)
+ ffree %st(7)
+ call @log10
+ add $8,sp
+ ret
+
+ section (exp_real)
+exp_real:
+ sub $8,sp
+ fstpl (sp)
+ ffree %st(0)
+ ffree %st(1)
+ ffree %st(2)
+ ffree %st(3)
+ ffree %st(4)
+ ffree %st(5)
+ ffree %st(6)
+ ffree %st(7)
+ call @exp
+ add $8,sp
+ ret
+
+ section (pow_real)
+pow_real:
+ sub $16,sp
+ fstpl 8(sp)
+ fstpl (sp)
+ ffree %st(0)
+ ffree %st(1)
+ ffree %st(2)
+ ffree %st(3)
+ ffree %st(4)
+ ffree %st(5)
+ ffree %st(6)
+ ffree %st(7)
+ call @pow
+ add $16,sp
+ ret
+
+ section (entier_real)
+entier_real:
+ sub $8,sp
+ fstpl (sp)
+ ffree %st(0)
+ ffree %st(1)
+ ffree %st(2)
+ ffree %st(3)
+ ffree %st(4)
+ ffree %st(5)
+ ffree %st(6)
+ ffree %st(7)
+ call @floor
+ add $8,sp
+#endif
+
+r_to_i_real:
+ subl $4,sp
+ fistl (sp)
+ pop d0
+ ret
+
+#ifdef NEW_DESCRIPTORS
+# include "iap.s"
+#endif
+
+#include "ithread.s"
diff --git a/thread/ithread.s b/thread/ithread.s
new file mode 100644
index 0000000..d5285c9
--- /dev/null
+++ b/thread/ithread.s
@@ -0,0 +1,294 @@
+/
+/ File: ithread.s
+/ Author: John van Groningen
+/ Machine: Intel 386
+
+#define d0 %eax
+#define d1 %ebx
+#define a0 %ecx
+#define a1 %edx
+#define a2 %ebp
+#define a3 %esi
+#define a4 %edi
+#define sp %esp
+
+#define d0w %ax
+#define d1w %bx
+#define a0w %cx
+#define a1w %dx
+#define a2w %bp
+#define a3w %si
+#define a4w %di
+
+#define d0b %al
+#define d1b %bl
+#define a0b %cl
+#define a1b %dl
+
+#define d0lb %al
+#define d0hb %ah
+#define d1lb %bl
+#define d1hb %bh
+
+ .text
+
+ .globl @GetProcessHeap?0
+ .globl @HeapAlloc?12
+
+ .globl @clean_new_thread
+
+@clean_new_thread:
+ call @GetProcessHeap?0
+
+ pushl $256
+ pushl $0
+ push d0
+ call @HeapAlloc?12
+
+ movl d0,a4
+ movl tlsp_tls_index,d0
+ movl a4,%fs:0x0e10(,d0,4)
+
+ movl 4(sp),a0
+
+ movl 4(a0),d1
+ test d1,d1
+ jne clean_new_thread_1
+ movl main_thread_local_storage+heap_size_offset,d1
+clean_new_thread_1:
+ movl d1,heap_size_offset(a4)
+
+ movl 8(a0),d1
+ test d1,d1
+ jne clean_new_thread_2
+ movl main_thread_local_storage+a_stack_size_offset,d1
+clean_new_thread_2:
+ movl d1,a_stack_size_offset(a4)
+
+ call init_thread
+
+ movl a3,saved_a_stack_p_offset(a4)
+ movl sp,halt_sp_offset (a4)
+
+ movl 4(sp),d0
+ push d0
+ call *(d0)
+ addl $4,sp
+
+ movl tlsp_tls_index,a4
+ movl %fs:0x0e10(,a4,4),a4
+ jmp exit_thread
+
+init_thread:
+ movl heap_size_offset(a4),d0
+#ifdef PREFETCH2
+ sub $63,d0
+#else
+ sub $3,d0
+#endif
+ xorl a1,a1
+ mov $33,d1
+ div d1
+ movl d0,heap_size_33_offset(a4)
+
+ movl heap_size_offset(a4),d0
+ sub $3,d0
+ xorl a1,a1
+ mov $129,d1
+ div d1
+ mov d0,heap_size_129_offset(a4)
+ add $3,d0
+ andl $-4,d0
+ movl d0,heap_copied_vector_size_offset(a4)
+ movl $0,heap_end_after_copy_gc_offset(a4)
+
+ movl heap_size_offset(a4),d0
+ add $7,d0
+
+ push d0
+#ifdef USE_CLIB
+ call @malloc
+#else
+ call @allocate_memory
+#endif
+ add $4,sp
+
+ test d0,d0
+ je init_thread_no_memory_2
+
+ mov d0,heap_mbp_offset(a4)
+ addl $3,d0
+ and $-4,d0
+ mov d0,free_heap_offset(a4)
+ mov d0,heap_p_offset(a4)
+
+ movl a_stack_size_offset(a4),a2
+ add $3,a2
+
+ push a2
+#ifdef STACK_OVERFLOW_EXCEPTION_HANDLER
+ call @allocate_memory_with_guard_page_at_end
+#else
+# ifdef USE_CLIB
+ call @malloc
+# else
+ call @allocate_memory
+# endif
+#endif
+ add $4,sp
+
+ test d0,d0
+ je init_thread_no_memory_3
+
+ mov d0,stack_mbp_offset(a4)
+#ifdef STACK_OVERFLOW_EXCEPTION_HANDLER
+ addl a_stack_size_offset(a4),d0
+ addl $3+4095,d0
+ andl $-4096,d0
+ movl d0,a_stack_guard_page
+ subl a_stack_size_offset(a4),d0
+#endif
+ add $3,d0
+ andl $-4,d0
+
+ mov d0,a3
+ mov d0,stack_p_offset(a4)
+
+/ lea caf_list+4,a0
+/ movl a0,caf_listp
+
+/ #ifdef FINALIZERS
+/ movl $__Nil-4,finalizer_list
+/ movl $__Nil-4,free_finalizer_list
+/ #endif
+
+ mov free_heap_offset(a4),a1
+ mov a1,heap_p1_offset(a4)
+
+ movl heap_size_129_offset(a4),a2
+ shl $4,a2
+ lea (a1,a2,4),d0
+ mov d0,heap_copied_vector_offset(a4)
+ add heap_copied_vector_size_offset(a4),d0
+ mov d0,heap_p2_offset(a4)
+
+ movb $0,garbage_collect_flag_offset(a4)
+
+# ifdef MARK_AND_COPY_GC
+ testb $64,@flags
+ je init_thread_no_mark1
+# endif
+
+# if defined (MARK_GC) || defined (COMPACT_GC_ONLY)
+ movl heap_size_33_offset(a4),d0
+ movl a1,heap_vector_offset(a4)
+ addl d0,a1
+# ifdef PREFETCH2
+ addl $63,a1
+ andl $-64,a1
+# else
+ addl $3,a1
+ andl $-4,a1
+# endif
+ movl a1,free_heap_offset(a4)
+ movl a1,heap_p3_offset(a4)
+ lea (,d0,8),a2
+ movb $-1,garbage_collect_flag_offset(a4)
+# endif
+
+# ifdef MARK_AND_COPY_GC
+init_thread_no_mark1:
+# endif
+
+# ifdef ADJUST_HEAP_SIZE
+ movl @initial_heap_size,d0
+# ifdef MARK_AND_COPY_GC
+ movl $(MINIMUM_HEAP_SIZE_2),d1
+ testb $64,@flags
+ jne init_thread_no_mark9
+ addl d1,d1
+init_thread_no_mark9:
+# else
+# if defined (MARK_GC) || defined (COMPACT_GC_ONLY)
+ movl $(MINIMUM_HEAP_SIZE),d1
+# else
+ movl $(MINIMUM_HEAP_SIZE_2),d1
+# endif
+# endif
+
+ cmpl d1,d0
+ jle init_thread_too_large_or_too_small
+ shr $2,d0
+ cmpl a2,d0
+ jge init_thread_too_large_or_too_small
+ movl d0,a2
+init_thread_too_large_or_too_small:
+# endif
+
+ lea (a1,a2,4),d0
+ mov d0,heap_end_after_gc_offset(a4)
+ subl $32,d0
+ movl d0,end_heap_offset(a4)
+
+# ifdef MARK_AND_COPY_GC
+ testb $64,@flags
+ je init_thread_no_mark2
+# endif
+
+# if defined (MARK_GC) && defined (ADJUST_HEAP_SIZE)
+ movl a2,bit_vector_size_offset(a4)
+# endif
+
+# ifdef MARK_AND_COPY_GC
+init_thread_no_mark2:
+# endif
+
+ movl $0,bit_counter_offset(a4)
+ movl $0,zero_bits_before_mark_offset(a4)
+
+ xor %eax,%eax
+ ret
+
+init_thread_no_memory_2:
+ movl $1,%eax
+ ret
+
+init_thread_no_memory_3:
+ push heap_mbp_offset(a4)
+#ifdef USE_CLIB
+ call @free
+#else
+ call @free_memory
+#endif
+ add $4,sp
+
+ movl $1,%eax
+ ret
+
+exit_thread:
+ call add_execute_time
+
+ push stack_mbp_offset(a4)
+#ifdef USE_CLIB
+ call @free
+#else
+ call @free_memory
+#endif
+ add $4,sp
+
+ push heap_mbp_offset(a4)
+#ifdef USE_CLIB
+ call @free
+#else
+ call @free_memory
+#endif
+ add $4,sp
+
+ call @GetProcessHeap?0
+
+ pushl a4
+ pushl $0
+ push d0
+ call @HeapFree?12
+
+ ret